repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
octomap | octomap-master/octovis/src/extern/QGLViewer/VRender/Vector2.h | /*
This file is part of the VRender library.
Copyright (C) 2005 Cyril Soler ([email protected])
Version 1.0.0, released on June 27, 2005.
http://artis.imag.fr/Members/Cyril.Soler/VRender
VRender is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
VRender is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VRender; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/****************************************************************************
Copyright (C) 2002-2014 Gilles Debunne. All rights reserved.
This file is part of the QGLViewer library version 2.6.3.
http://www.libqglviewer.com - [email protected]
This file may be used under the terms of the GNU General Public License
versions 2.0 or 3.0 as published by the Free Software Foundation and
appearing in the LICENSE file included in the packaging of this file.
In addition, as a special exception, Gilles Debunne gives you certain
additional rights, described in the file GPL_EXCEPTION in this package.
libQGLViewer uses dual licensing. Commercial/proprietary software must
purchase a libQGLViewer Commercial License.
This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*****************************************************************************/
#ifndef _VRENDER_VECTOR2_H
#define _VRENDER_VECTOR2_H
#include <stdexcept>
#include <iostream>
namespace vrender
{
class Vector3;
class Vector2
{
public:
// ---------------------------------------------------------------------------
//! @name Constant
//@{
static const Vector2 inf;
//@}
// ---------------------------------------------------------------------------
//! @name Constructor(s) and destructor
//@{
Vector2 ();
~Vector2 ();
Vector2 (const Vector2&);
Vector2 (const Vector3& u);
Vector2 (double,double);
//@}
// ---------------------------------------------------------------------------
//! @name Access methods
//@{
inline double x() const { return _xyz[0]; }
inline double y() const { return _xyz[1]; }
inline void setX(double r) { _xyz[0] = r; }
inline void setY(double r) { _xyz[1] = r; }
inline void setXY (double x,double y) { _xyz[0] = x; _xyz[1] = y; }
//@}
// ---------------------------------------------------------------------------
//! @name Assignment
//@{
inline Vector2& operator= (const Vector2& u) { _xyz[0] = u._xyz[0]; _xyz[1] = u._xyz[1]; return *this; }
//@}
// ---------------------------------------------------------------------------
//! @name Comparisons
//@{
friend bool operator== (const Vector2&,const Vector2&);
friend bool operator!= (const Vector2&,const Vector2&);
//@}
// ---------------------------------------------------------------------------
//! @name Algebraic operations
//@{
inline Vector2& operator+= (const Vector2& v)
{
_xyz[0] += v._xyz[0];
_xyz[1] += v._xyz[1];
return *this;
}
inline Vector2& operator-= (const Vector2& v)
{
_xyz[0] -= v._xyz[0];
_xyz[1] -= v._xyz[1];
return *this;
}
inline Vector2& operator*= (double f) { _xyz[0] *= f; _xyz[1] *= f; return *this;}
inline Vector2& operator/= (double f) { _xyz[0] /= f; _xyz[1] /= f; return *this;}
friend Vector2 operator- (const Vector2&);
static Vector2 mini(const Vector2&,const Vector2&) ;
static Vector2 maxi(const Vector2&,const Vector2&) ;
inline Vector2 operator+(const Vector2& u) const
{
return Vector2(_xyz[0]+u._xyz[0],_xyz[1]+u._xyz[1]);
}
inline Vector2 operator-(const Vector2& u) const
{
return Vector2(_xyz[0]-u._xyz[0],_xyz[1]-u._xyz[1]);
}
inline double operator*(const Vector2& u) const
{
return _xyz[0]*u._xyz[0] + _xyz[1]*u._xyz[1] ;
}
inline double operator^(const Vector2& v) const
{
return _xyz[0]*v._xyz[1] - _xyz[1]*v._xyz[0] ;
}
Vector2 operator/ (double v) { return Vector2(_xyz[0]/v,_xyz[1]/v); }
Vector2 operator* (double v) { return Vector2(_xyz[0]*v,_xyz[1]*v); }
friend Vector2 operator* (double,const Vector2&);
//@}
// ---------------------------------------------------------------------------
//! @name Metrics
//@{
double norm () const;
double squareNorm () const;
double infNorm () const; /// Should be used for most comparisons, for efficiency reasons.
//@}
// ---------------------------------------------------------------------------
//! @name Stream overrides
//@{
friend std::ostream& operator<< (std::ostream&,const Vector2&);
//@}
double operator[] (int i) const
{
if((i < 0)||(i > 1))
throw std::runtime_error("Out of bounds in Vector2::operator[]") ;
return _xyz[i];
}
double& operator[] (int i)
{
if((i < 0)||(i > 1))
throw std::runtime_error("Out of bounds in Vector2::operator[]") ;
return _xyz[i];
}
private:
double _xyz[2]; //!< The 3 vector components
}; // interface of Vector2
}
#endif // _VECTOR2_H
| 5,582 | 29.675824 | 108 | h |
octomap | octomap-master/octovis/src/extern/QGLViewer/VRender/Vector3.cpp | /*
This file is part of the VRender library.
Copyright (C) 2005 Cyril Soler ([email protected])
Version 1.0.0, released on June 27, 2005.
http://artis.imag.fr/Members/Cyril.Soler/VRender
VRender is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
VRender is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VRender; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/****************************************************************************
Copyright (C) 2002-2014 Gilles Debunne. All rights reserved.
This file is part of the QGLViewer library version 2.6.3.
http://www.libqglviewer.com - [email protected]
This file may be used under the terms of the GNU General Public License
versions 2.0 or 3.0 as published by the Free Software Foundation and
appearing in the LICENSE file included in the packaging of this file.
In addition, as a special exception, Gilles Debunne gives you certain
additional rights, described in the file GPL_EXCEPTION in this package.
libQGLViewer uses dual licensing. Commercial/proprietary software must
purchase a libQGLViewer Commercial License.
This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*****************************************************************************/
#include <iostream>
#include "Vector3.h"
#include "NVector3.h"
#include <math.h>
#include <algorithm>
#ifdef WIN32
# include <windows.h>
#endif
using namespace vrender;
using namespace std;
const Vector3 Vector3::inf(FLT_MAX, FLT_MAX, FLT_MAX);
Vector3::Vector3 ()
{
_xyz[0] = 0.0;
_xyz[1] = 0.0;
_xyz[2] = 0.0;
}
// -----------------------------------------------------------------------------
//! Default destructor
Vector3::~Vector3 ()
{
}
// -----------------------------------------------------------------------------
//! Copy constructor
Vector3::Vector3 (const Vector3& u)
{
setXYZ(u[0],u[1],u[2]);
}
// -----------------------------------------------------------------------------
//! Copy constructor from a normalized vector
Vector3::Vector3 (const NVector3& u)
{
setXYZ(u[0],u[1],u[2]);
}
// -----------------------------------------------------------------------------
//! Create a vector from real values
Vector3::Vector3 (double x,double y,double z)
{
setXYZ(x,y,z);
}
// -----------------------------------------------------------------------------
//! Assignment with a normalized vector
Vector3& Vector3::operator= (const NVector3& u)
{
_xyz[0] = u[0];
_xyz[1] = u[1];
_xyz[2] = u[2];
return ( *this );
}
// -----------------------------------------------------------------------------
//! Self addition with a normalized vector
Vector3& Vector3::operator+= (const NVector3& u)
{
_xyz[0] += u[0];
_xyz[1] += u[1];
_xyz[2] += u[2];
return ( *this );
}
// -----------------------------------------------------------------------------
//! Self substraction with a normalized vector
Vector3& Vector3::operator-= (const NVector3& u)
{
_xyz[0] -= u[0];
_xyz[1] -= u[1];
_xyz[2] -= u[2];
return ( *this );
}
// -----------------------------------------------------------------------------
//! Left multiplication by a real value
Vector3 vrender::operator* (double r,const Vector3& u)
{
return ( Vector3(r*u[0], r*u[1], r*u[2]) );
}
// -----------------------------------------------------------------------------
//! Norm
double Vector3::norm () const
{
return sqrt( _xyz[0]*_xyz[0] + _xyz[1]*_xyz[1] + _xyz[2]*_xyz[2] );
}
// -----------------------------------------------------------------------------
//! Square norm (self dot product)
double Vector3::squareNorm () const
{
return _xyz[0]*_xyz[0] + _xyz[1]*_xyz[1] + _xyz[2]*_xyz[2];
}
// -----------------------------------------------------------------------------
//! Infinite norm
double Vector3::infNorm() const
{
return std::max(std::max(fabs(_xyz[0]),fabs(_xyz[1])),fabs(_xyz[2])) ;
}
// -----------------------------------------------------------------------------
//! Out stream override: prints the 3 vector components
std::ostream& vrender::operator<< (std::ostream& out,const Vector3& u)
{
out << u[0] << " " << u[1] << " " << u[2];
return ( out );
}
Vector3 Vector3::mini(const Vector3& v1,const Vector3& v2)
{
return Vector3(std::min(v1[0],v2[0]),std::min(v1[1],v2[1]),std::min(v1[2],v2[2])) ;
}
Vector3 Vector3::maxi(const Vector3& v1,const Vector3& v2)
{
return Vector3(std::max(v1[0],v2[0]),std::max(v1[1],v2[1]),std::max(v1[2],v2[2])) ;
}
| 5,034 | 28.444444 | 85 | cpp |
octomap | octomap-master/octovis/src/extern/QGLViewer/VRender/Vector3.h | /*
This file is part of the VRender library.
Copyright (C) 2005 Cyril Soler ([email protected])
Version 1.0.0, released on June 27, 2005.
http://artis.imag.fr/Members/Cyril.Soler/VRender
VRender is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
VRender is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VRender; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/****************************************************************************
Copyright (C) 2002-2014 Gilles Debunne. All rights reserved.
This file is part of the QGLViewer library version 2.6.3.
http://www.libqglviewer.com - [email protected]
This file may be used under the terms of the GNU General Public License
versions 2.0 or 3.0 as published by the Free Software Foundation and
appearing in the LICENSE file included in the packaging of this file.
In addition, as a special exception, Gilles Debunne gives you certain
additional rights, described in the file GPL_EXCEPTION in this package.
libQGLViewer uses dual licensing. Commercial/proprietary software must
purchase a libQGLViewer Commercial License.
This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*****************************************************************************/
#ifndef _VRENDER_VECTOR3_H
#define _VRENDER_VECTOR3_H
#include <stdexcept>
#ifndef FLT_MAX
# define FLT_MAX 9.99E20f
#endif
namespace vrender
{
class NVector3;
class Vector3
{
public:
// ---------------------------------------------------------------------------
//! @name Constant
//@{
static const Vector3 inf;
//@}
// ---------------------------------------------------------------------------
//! @name Constructor(s) and destructor
//@{
Vector3 ();
~Vector3 ();
Vector3 (const Vector3&);
Vector3 (const NVector3&);
Vector3 (double, double, double);
//@}
// ---------------------------------------------------------------------------
//! @name Access methods
//@{
inline double x() const { return _xyz[0]; }
inline double y() const { return _xyz[1]; }
inline double z() const { return _xyz[2]; }
inline void setX(double r) { _xyz[0] = r; }
inline void setY(double r) { _xyz[1] = r; }
inline void setZ(double r) { _xyz[2] = r; }
inline void setXYZ (double x,double y,double z) { _xyz[0] = x; _xyz[1] = y; _xyz[2] = z; }
//@}
// ---------------------------------------------------------------------------
//! @name Assignment
//@{
inline Vector3& operator= (const Vector3& u) { _xyz[0] = u._xyz[0]; _xyz[1] = u._xyz[1]; _xyz[2] = u._xyz[2]; return *this; }
Vector3& operator= (const NVector3& u);
//@}
// ---------------------------------------------------------------------------
//! @name Comparisons
//@{
friend bool operator== (const Vector3&,const Vector3&);
friend bool operator!= (const Vector3&,const Vector3&);
//@}
// ---------------------------------------------------------------------------
//! @name Algebraic operations
//@{
inline Vector3& operator+= (const Vector3& v)
{
_xyz[0] += v._xyz[0];
_xyz[1] += v._xyz[1];
_xyz[2] += v._xyz[2];
return *this;
}
inline Vector3& operator-= (const Vector3& v)
{
_xyz[0] -= v._xyz[0];
_xyz[1] -= v._xyz[1];
_xyz[2] -= v._xyz[2];
return *this;
}
inline Vector3& operator*= (double f) { _xyz[0] *= f; _xyz[1] *= f; _xyz[2] *= f; return *this;}
inline Vector3& operator/= (double f) { _xyz[0] /= f; _xyz[1] /= f; _xyz[2] /= f; return *this;}
static Vector3 mini(const Vector3&,const Vector3&) ;
static Vector3 maxi(const Vector3&,const Vector3&) ;
Vector3& operator-= (const NVector3&);
Vector3& operator+= (const NVector3&);
friend Vector3 operator- (const Vector3& u) { return Vector3(-u[0], -u[1], -u[2]); }
inline Vector3 operator+(const Vector3& u) const
{
return Vector3(_xyz[0]+u._xyz[0],_xyz[1]+u._xyz[1],_xyz[2]+u._xyz[2]);
}
inline Vector3 operator-(const Vector3& u) const
{
return Vector3(_xyz[0]-u._xyz[0],_xyz[1]-u._xyz[1],_xyz[2]-u._xyz[2]);
}
inline double operator*(const Vector3& u) const
{
return _xyz[0]*u._xyz[0] + _xyz[1]*u._xyz[1] + _xyz[2]*u._xyz[2];
}
inline Vector3 operator^(const Vector3& v) const
{
return Vector3( _xyz[1]*v._xyz[2] - _xyz[2]*v._xyz[1],
_xyz[2]*v._xyz[0] - _xyz[0]*v._xyz[2],
_xyz[0]*v._xyz[1] - _xyz[1]*v._xyz[0]);
}
Vector3 operator/ (double v) { return Vector3(_xyz[0]/v,_xyz[1]/v,_xyz[2]/v); }
Vector3 operator* (double v) { return Vector3(_xyz[0]*v,_xyz[1]*v,_xyz[2]*v); }
friend Vector3 operator* (double,const Vector3&);
//@}
// ---------------------------------------------------------------------------
//! @name Metrics
//@{
double norm () const;
double squareNorm () const;
double infNorm () const; /// Should be used for most comparisons, for efficiency reasons.
//@}
// ---------------------------------------------------------------------------
//! @name Stream overrides
//@{
friend std::ostream& operator<< (std::ostream&,const Vector3&);
//@}
double operator[] (int i) const
{
if((i < 0)||(i > 2))
throw std::runtime_error("Out of bounds in Vector3::operator[]") ;
return _xyz[i];
}
double& operator[] (int i)
{
if((i < 0)||(i > 2))
throw std::runtime_error("Out of bounds in Vector3::operator[]") ;
return _xyz[i];
}
private:
double _xyz[3]; //!< The 3 vector components
}; // interface of Vector3
}
#endif // _VECTOR3_H
| 6,196 | 30.617347 | 129 | h |
octomap | octomap-master/octovis/src/extern/QGLViewer/VRender/VisibilityOptimizer.cpp | /*
This file is part of the VRender library.
Copyright (C) 2005 Cyril Soler ([email protected])
Version 1.0.0, released on June 27, 2005.
http://artis.imag.fr/Members/Cyril.Soler/VRender
VRender is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
VRender is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VRender; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/****************************************************************************
Copyright (C) 2002-2014 Gilles Debunne. All rights reserved.
This file is part of the QGLViewer library version 2.6.3.
http://www.libqglviewer.com - [email protected]
This file may be used under the terms of the GNU General Public License
versions 2.0 or 3.0 as published by the Free Software Foundation and
appearing in the LICENSE file included in the packaging of this file.
In addition, as a special exception, Gilles Debunne gives you certain
additional rights, described in the file GPL_EXCEPTION in this package.
libQGLViewer uses dual licensing. Commercial/proprietary software must
purchase a libQGLViewer Commercial License.
This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*****************************************************************************/
#include <vector>
#include "VRender.h"
#include "Optimizer.h"
#include "Primitive.h"
#include "gpc.h"
#include "math.h"
using namespace vrender ;
using namespace std ;
#ifdef A_FAIRE
void VisibilityOptimizer::optimize(vector<PtrPrimitive>& primitives,float& percentage_finished,string& message)
#else
void VisibilityOptimizer::optimize(vector<PtrPrimitive>& primitives,VRenderParams& vparams)
#endif
{
#ifdef DEBUG_VO
cout << "Optimizing visibility." << endl ;
#endif
unsigned long N = primitives.size()/200 + 1 ;
#ifdef DEBUG_EPSRENDER__SHOW1
// cout << "Showing viewer." << endl ;
// myViewer viewer ;
// viewer.show();
double minx = FLT_MAX ;
double miny = FLT_MAX ;
double maxx = -FLT_MAX ;
double maxy = -FLT_MAX ;
for(unsigned int i=0;i<primitives.size();++i)
for(int j=0;j<primitives[i]->nbVertices();++j)
{
if(maxx < primitives[i]->vertex(j).x()) maxx = primitives[i]->vertex(j).x() ;
if(maxy < primitives[i]->vertex(j).y()) maxy = primitives[i]->vertex(j).y() ;
if(minx > primitives[i]->vertex(j).x()) minx = primitives[i]->vertex(j).x() ;
if(miny > primitives[i]->vertex(j).y()) miny = primitives[i]->vertex(j).y() ;
}
glMatrixMode(GL_PROJECTION) ;
glLoadIdentity() ;
glOrtho(minx,maxx,miny,maxy,-1,1) ;
glMatrixMode(GL_MODELVIEW) ;
glLoadIdentity() ;
cout << "Window set to " << minx << " " << maxx << " " << miny << " " << maxy << endl ;
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT) ;
glLineWidth(3.0) ;
#endif
int nb_culled = 0 ;
// Ca serait pas mal mieux avec une interface c++...
gpc_polygon cumulated_union ;
cumulated_union.num_contours = 0 ;
cumulated_union.hole = NULL ;
cumulated_union.contour = NULL ;
size_t nboptimised = 0 ;
for(size_t pindex = primitives.size() - 1; long(pindex) >= 0;--pindex,++nboptimised)
if(primitives[pindex] != NULL)
{
#ifdef A_FAIRE
percentage_finished = pindex / (float)primitives.size() ;
#endif
if(primitives[pindex]->nbVertices() > 1)
{
#ifdef DEBUG_VO
if(pindex%50==0)
{
char buff[500] ;
sprintf(buff,"Left: % 6ld - Culled: % 6ld", pindex,(long)nb_culled) ;
fprintf(stdout,buff);
for(unsigned int j=0;j<strlen(buff);++j)
fprintf(stdout,"\b") ;
fflush(stdout) ;
}
#endif
try
{
PtrPrimitive p(primitives[pindex]) ;
gpc_polygon difference ;
gpc_polygon new_poly ;
gpc_polygon new_poly_reduced ;
new_poly.num_contours = 0 ;
new_poly.hole = NULL ;
new_poly.contour = NULL ;
new_poly_reduced.num_contours = 0 ;
new_poly_reduced.hole = NULL ;
new_poly_reduced.contour = NULL ;
// 1 - creates a gpc_polygon corresponding to the current primitive
gpc_vertex_list *new_poly_verts = new gpc_vertex_list ;
gpc_vertex_list *new_poly_reduced_verts = new gpc_vertex_list ;
double mx = 0.0 ;
double my = 0.0 ;
if(p->nbVertices() == 2)
{
new_poly_verts->num_vertices = 4 ;
new_poly_verts->vertex = new gpc_vertex[4] ;
new_poly_reduced_verts->num_vertices = 4 ;
new_poly_reduced_verts->vertex = new gpc_vertex[4] ;
double deps = 0.001 ;
double du = p->vertex(1).y()-p->vertex(0).y() ;
double dv = p->vertex(1).x()-p->vertex(0).x() ;
double n = sqrt(du*du+dv*dv) ;
du *= deps/n ;
dv *= deps/n ;
new_poly_verts->vertex[0].x = p->vertex(0).x() + du ;
new_poly_verts->vertex[0].y = p->vertex(0).y() + dv ;
new_poly_verts->vertex[1].x = p->vertex(1).x() + du ;
new_poly_verts->vertex[1].y = p->vertex(1).y() + dv ;
new_poly_verts->vertex[2].x = p->vertex(1).x() - du ;
new_poly_verts->vertex[2].y = p->vertex(1).y() - dv ;
new_poly_verts->vertex[3].x = p->vertex(0).x() - du ;
new_poly_verts->vertex[3].y = p->vertex(0).y() - dv ;
new_poly_reduced_verts->vertex[0].x = p->vertex(0).x() + du ;
new_poly_reduced_verts->vertex[0].y = p->vertex(0).y() + dv ;
new_poly_reduced_verts->vertex[1].x = p->vertex(1).x() + du ;
new_poly_reduced_verts->vertex[1].y = p->vertex(1).y() + dv ;
new_poly_reduced_verts->vertex[2].x = p->vertex(1).x() - du ;
new_poly_reduced_verts->vertex[2].y = p->vertex(1).y() - dv ;
new_poly_reduced_verts->vertex[3].x = p->vertex(0).x() - du ;
new_poly_reduced_verts->vertex[3].y = p->vertex(0).y() - dv ;
}
else
{
new_poly_verts->num_vertices = p->nbVertices() ;
new_poly_verts->vertex = new gpc_vertex[p->nbVertices()] ;
for(size_t i=0;i<p->nbVertices();++i)
{
new_poly_verts->vertex[i].x = p->vertex(i).x() ;
new_poly_verts->vertex[i].y = p->vertex(i).y() ;
mx += p->vertex(i).x() ;
my += p->vertex(i).y() ;
}
mx /= p->nbVertices() ;
my /= p->nbVertices() ;
new_poly_reduced_verts->num_vertices = p->nbVertices() ;
new_poly_reduced_verts->vertex = new gpc_vertex[p->nbVertices()] ;
for(size_t j=0;j<p->nbVertices();++j)
{
new_poly_reduced_verts->vertex[j].x = mx + (p->vertex(j).x() - mx)*0.999 ;
new_poly_reduced_verts->vertex[j].y = my + (p->vertex(j).y() - my)*0.999 ;
}
}
gpc_add_contour(&new_poly,new_poly_verts,false) ;
gpc_add_contour(&new_poly_reduced,new_poly_reduced_verts,false) ;
// 2 - computes the difference between this polygon, and the union of the
// preceeding ones.
gpc_polygon_clip(GPC_DIFF,&new_poly_reduced,&cumulated_union,&difference) ;
// 3 - checks the difference. If void, the primitive is not visible: skip it
// and go to next primitive.
if(difference.num_contours == 0)
{
++nb_culled ;
delete p ;
primitives[pindex] = NULL ;
continue ;
}
// 4 - The primitive is visible. Let's add it to the cumulated union of
// primitives.
if(p->nbVertices() > 2)
{
gpc_polygon cumulated_union_tmp ;
cumulated_union_tmp.num_contours = 0 ;
cumulated_union_tmp.hole = NULL ;
cumulated_union_tmp.contour = NULL ;
gpc_polygon_clip(GPC_UNION,&new_poly,&cumulated_union,&cumulated_union_tmp) ;
gpc_free_polygon(&cumulated_union) ;
cumulated_union = cumulated_union_tmp ;
}
gpc_free_polygon(&new_poly) ;
gpc_free_polygon(&new_poly_reduced) ;
gpc_free_polygon(&difference) ;
#ifdef DEBUG_EPSRENDER__SHOW1
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT) ;
glColor3f(1.0,0.0,0.0) ;
for(unsigned long i=0;i<cumulated_union.num_contours;++i)
{
glBegin(GL_LINE_LOOP) ;
for(unsigned long j=0;j<cumulated_union.contour[i].num_vertices;++j)
glVertex2f(cumulated_union.contour[i].vertex[j].x,cumulated_union.contour[i].vertex[j].y) ;
glEnd() ;
}
glFlush() ;
glXSwapBuffers(glXGetCurrentDisplay(),glXGetCurrentDrawable()) ;
#endif
}
catch(exception& )
{
; // std::cout << "Could not treat primitive " << pindex << ": internal gpc error." << endl ;
}
}
if(nboptimised%N==0)
vparams.progress(nboptimised/(float)primitives.size(), QGLViewer::tr("Visibility optimization")) ;
}
#ifdef DEBUG_VO
cout << nb_culled << " primitives culled over " << primitives.size() << "." << endl ;
#endif
gpc_free_polygon(&cumulated_union) ;
}
| 14,215 | 50.507246 | 147 | cpp |
octomap | octomap-master/octovis/src/extern/QGLViewer/VRender/gpc.cpp | /*
This file is part of the VRender library.
Copyright (C) 2005 Cyril Soler ([email protected])
Version 1.0.0, released on June 27, 2005.
http://artis.imag.fr/Members/Cyril.Soler/VRender
VRender is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
VRender is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VRender; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/****************************************************************************
Copyright (C) 2002-2014 Gilles Debunne. All rights reserved.
This file is part of the QGLViewer library version 2.6.3.
http://www.libqglviewer.com - [email protected]
This file may be used under the terms of the GNU General Public License
versions 2.0 or 3.0 as published by the Free Software Foundation and
appearing in the LICENSE file included in the packaging of this file.
In addition, as a special exception, Gilles Debunne gives you certain
additional rights, described in the file GPL_EXCEPTION in this package.
libQGLViewer uses dual licensing. Commercial/proprietary software must
purchase a libQGLViewer Commercial License.
This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*****************************************************************************/
/*
===========================================================================
Project: Generic Polygon Clipper
A new algorithm for calculating the difference, intersection,
exclusive-or or union of arbitrary polygon sets.
File: gpc.c
Author: Alan Murta (email: [email protected])
Version: 2.32
Date: 17th December 2004
Copyright: (C) 1997-2004, Advanced Interfaces Group,
University of Manchester.
This software is free for non-commercial use. It may be copied,
modified, and redistributed provided that this copyright notice
is preserved on all copies. The intellectual property rights of
the algorithms used reside with the University of Manchester
Advanced Interfaces Group.
You may not use this software, in whole or in part, in support
of any commercial product without the express consent of the
author.
There is no warranty or other guarantee of fitness of this
software for any purpose. It is provided solely "as is".
===========================================================================
*/
/*
===========================================================================
Includes
===========================================================================
*/
#include <stdexcept>
#include "gpc.h"
#include <stdlib.h>
#include <float.h>
#include <math.h>
using namespace std ;
/*
===========================================================================
Constants
===========================================================================
*/
#ifndef TRUE
#define FALSE 0
#define TRUE 1
#endif
#define LEFT 0
#define RIGHT 1
#define ABOVE 0
#define BELOW 1
#define CLIP 0
#define SUBJ 1
#define INVERT_TRISTRIPS FALSE
/*
===========================================================================
Macros
===========================================================================
*/
#define EQ(a, b) (fabs((a) - (b)) <= GPC_EPSILON)
#define PREV_INDEX(i, n) ((i - 1 + n) % n)
#define NEXT_INDEX(i, n) ((i + 1 ) % n)
#define OPTIMAL(v, i, n) ((v[PREV_INDEX(i, n)].y != v[i].y) || \
(v[NEXT_INDEX(i, n)].y != v[i].y))
#define FWD_MIN(v, i, n) ((v[PREV_INDEX(i, n)].vertex.y >= v[i].vertex.y) \
&& (v[NEXT_INDEX(i, n)].vertex.y > v[i].vertex.y))
#define NOT_FMAX(v, i, n) (v[NEXT_INDEX(i, n)].vertex.y > v[i].vertex.y)
#define REV_MIN(v, i, n) ((v[PREV_INDEX(i, n)].vertex.y > v[i].vertex.y) \
&& (v[NEXT_INDEX(i, n)].vertex.y >= v[i].vertex.y))
#define NOT_RMAX(v, i, n) (v[PREV_INDEX(i, n)].vertex.y > v[i].vertex.y)
#define VERTEX(e,p,s,x,y) {add_vertex(&((e)->outp[(p)]->v[(s)]), x, y); \
(e)->outp[(p)]->active++;}
#define P_EDGE(d,e,p,i,j) {(d)= (e); \
do {(d)= (d)->prev;} while (!(d)->outp[(p)]); \
(i)= (d)->bot.x + (d)->dx * ((j)-(d)->bot.y);}
#define N_EDGE(d,e,p,i,j) {(d)= (e); \
do {(d)= (d)->next;} while (!(d)->outp[(p)]); \
(i)= (d)->bot.x + (d)->dx * ((j)-(d)->bot.y);}
#define MALLOC(p, b, s, t) {if ((b) > 0) { \
p= (t*)malloc(b); if (!(p)) { \
fprintf(stderr, "gpc malloc failure: %s\n", s); \
exit(0);}} else p= NULL;}
#define FREE(p) {if (p) {free(p); (p)= NULL;}}
/*
===========================================================================
Private Data Types
===========================================================================
*/
typedef enum /* Edge intersection classes */
{
NUL, /* Empty non-intersection */
EMX, /* External maximum */
ELI, /* External left intermediate */
TED, /* Top edge */
ERI, /* External right intermediate */
RED, /* Right edge */
IMM, /* Internal maximum and minimum */
IMN, /* Internal minimum */
EMN, /* External minimum */
EMM, /* External maximum and minimum */
LED, /* Left edge */
ILI, /* Internal left intermediate */
BED, /* Bottom edge */
IRI, /* Internal right intermediate */
IMX, /* Internal maximum */
FUL /* Full non-intersection */
} vertex_type;
typedef enum /* Horizontal edge states */
{
NH, /* No horizontal edge */
BH, /* Bottom horizontal edge */
TH /* Top horizontal edge */
} h_state;
typedef enum /* Edge bundle state */
{
UNBUNDLED, /* Isolated edge not within a bundle */
BUNDLE_HEAD, /* Bundle head node */
BUNDLE_TAIL /* Passive bundle tail node */
} bundle_state;
typedef struct v_shape /* Internal vertex list datatype */
{
double x; /* X coordinate component */
double y; /* Y coordinate component */
struct v_shape *next; /* Pointer to next vertex in list */
} vertex_node;
class polygon_node /* Internal contour / tristrip type */
{
public:
polygon_node(): next(0),proxy(0) { v[0]=0; v[1]=0; }
int active; /* Active flag / vertex count */
int hole; /* Hole / external contour flag */
vertex_node *v[2]; /* Left and right vertex list ptrs */
polygon_node *next; /* Pointer to next polygon contour */
polygon_node *proxy; /* Pointer to actual structure used */
} ;
class edge_node
{
public:
edge_node()
: prev(0),next(0),pred(0),succ(0),next_bound(0)
{
outp[0] = 0 ;
outp[1] = 0 ;
}
gpc_vertex vertex; /* Piggy-backed contour vertex data */
gpc_vertex bot; /* Edge lower (x, y) coordinate */
gpc_vertex top; /* Edge upper (x, y) coordinate */
double xb; /* Scanbeam bottom x coordinate */
double xt; /* Scanbeam top x coordinate */
double dx; /* Change in x for a unit y increase */
int type; /* Clip / subject edge flag */
int bundle[2][2]; /* Bundle edge flags */
int bside[2]; /* Bundle left / right indicators */
bundle_state bstate[2]; /* Edge bundle state */
polygon_node *outp[2]; /* Output polygon / tristrip pointer */
edge_node *prev; /* Previous edge in the AET */
edge_node *next; /* Next edge in the AET */
edge_node *pred; /* Edge connected at the lower end */
edge_node *succ; /* Edge connected at the upper end */
edge_node *next_bound; /* Pointer to next bound in LMT */
} ;
class lmt_node /* Local minima table */
{
public:
lmt_node() : first_bound(0),next(0) {}
double y; /* Y coordinate at local minimum */
edge_node *first_bound; /* Pointer to bound list */
lmt_node *next; /* Pointer to next local minimum */
} ;
typedef struct sbt_t_shape /* Scanbeam tree */
{
double y; /* Scanbeam node y value */
struct sbt_t_shape *less; /* Pointer to nodes with lower y */
struct sbt_t_shape *more; /* Pointer to nodes with higher y */
} sb_tree;
typedef struct it_shape /* Intersection table */
{
edge_node *ie[2]; /* Intersecting edge (bundle) pair */
gpc_vertex point; /* Point of intersection */
struct it_shape *next; /* The next intersection table node */
} it_node;
typedef struct st_shape /* Sorted edge table */
{
edge_node *edge; /* Pointer to AET edge */
double xb; /* Scanbeam bottom x coordinate */
double xt; /* Scanbeam top x coordinate */
double dx; /* Change in x for a unit y increase */
struct st_shape *prev; /* Previous edge in sorted list */
} st_node;
typedef struct bbox_shape /* Contour axis-aligned bounding box */
{
double xmin; /* Minimum x coordinate */
double ymin; /* Minimum y coordinate */
double xmax; /* Maximum x coordinate */
double ymax; /* Maximum y coordinate */
} bbox;
/*
===========================================================================
Global Data
===========================================================================
*/
/* Horizontal edge state transitions within scanbeam boundary */
const h_state next_h_state[3][6]=
{
/* ABOVE BELOW CROSS */
/* L R L R L R */
/* NH */ {BH, TH, TH, BH, NH, NH},
/* BH */ {NH, NH, NH, NH, TH, TH},
/* TH */ {NH, NH, NH, NH, BH, BH}
};
/*
===========================================================================
Private Functions
===========================================================================
*/
static void reset_it(it_node **it)
{
it_node *itn;
while (*it)
{
itn= (*it)->next;
FREE(*it);
*it= itn;
}
}
static void reset_lmt(lmt_node **lmt)
{
lmt_node *lmtn;
while (*lmt)
{
lmtn= (*lmt)->next;
FREE(*lmt);
*lmt= lmtn;
}
}
static void insert_bound(edge_node **b, edge_node *e)
{
edge_node *existing_bound;
if (!*b)
{
/* Link node e to the tail of the list */
*b= e;
}
else
{
/* Do primary sort on the x field */
if (e[0].bot.x < (*b)[0].bot.x)
{
/* Insert a new node mid-list */
existing_bound= *b;
*b= e;
(*b)->next_bound= existing_bound;
}
else
{
if (e[0].bot.x == (*b)[0].bot.x)
{
/* Do secondary sort on the dx field */
if (e[0].dx < (*b)[0].dx)
{
/* Insert a new node mid-list */
existing_bound= *b;
*b= e;
(*b)->next_bound= existing_bound;
}
else
{
/* Head further down the list */
insert_bound(&((*b)->next_bound), e);
}
}
else
{
/* Head further down the list */
insert_bound(&((*b)->next_bound), e);
}
}
}
}
static edge_node **bound_list(lmt_node **lmt, double y)
{
lmt_node *existing_node;
if (!*lmt)
{
/* Add node onto the tail end of the LMT */
MALLOC(*lmt, sizeof(lmt_node), "LMT insertion", lmt_node);
(*lmt)->y= y;
(*lmt)->first_bound= NULL;
(*lmt)->next= NULL;
return &((*lmt)->first_bound);
}
else
if (y < (*lmt)->y)
{
/* Insert a new LMT node before the current node */
existing_node= *lmt;
MALLOC(*lmt, sizeof(lmt_node), "LMT insertion", lmt_node);
(*lmt)->y= y;
(*lmt)->first_bound= NULL;
(*lmt)->next= existing_node;
return &((*lmt)->first_bound);
}
else
if (y > (*lmt)->y)
/* Head further up the LMT */
return bound_list(&((*lmt)->next), y);
else
/* Use this existing LMT node */
return &((*lmt)->first_bound);
}
static void add_to_sbtree(int *entries, sb_tree **sbtree, double y)
{
if (!*sbtree)
{
/* Add a new tree node here */
MALLOC(*sbtree, sizeof(sb_tree), "scanbeam tree insertion", sb_tree);
(*sbtree)->y= y;
(*sbtree)->less= NULL;
(*sbtree)->more= NULL;
(*entries)++;
}
else
{
if ((*sbtree)->y > y)
{
/* Head into the 'less' sub-tree */
add_to_sbtree(entries, &((*sbtree)->less), y);
}
else
{
if ((*sbtree)->y < y)
{
/* Head into the 'more' sub-tree */
add_to_sbtree(entries, &((*sbtree)->more), y);
}
}
}
}
static void build_sbt(int *entries, double *sbt, sb_tree *sbtree)
{
if (sbtree->less)
build_sbt(entries, sbt, sbtree->less);
sbt[*entries]= sbtree->y;
(*entries)++;
if (sbtree->more)
build_sbt(entries, sbt, sbtree->more);
}
static void free_sbtree(sb_tree **sbtree)
{
if (*sbtree)
{
free_sbtree(&((*sbtree)->less));
free_sbtree(&((*sbtree)->more));
FREE(*sbtree);
}
}
static int count_optimal_vertices(gpc_vertex_list c)
{
int result= 0;
/* Ignore non-contributing contours */
if (c.num_vertices > 0)
{
for (long i= 0; i < c.num_vertices; i++)
/* Ignore superfluous vertices embedded in horizontal edges */
if (OPTIMAL(c.vertex, i, c.num_vertices))
result++;
}
return result;
}
static edge_node *build_lmt(lmt_node **lmt, sb_tree **sbtree,
int *sbt_entries, gpc_polygon *p, int type,
gpc_op op)
{
int i, min, max, num_edges, v, num_vertices;
int total_vertices= 0, e_index=0;
edge_node *e, *edge_table;
for (size_t c= 0; c < p->num_contours; c++)
total_vertices+= count_optimal_vertices(p->contour[c]);
/* Create the entire input polygon edge table in one go */
MALLOC(edge_table, total_vertices * sizeof(edge_node), "edge table creation", edge_node);
for(int k=0;k<total_vertices;++k)
edge_table[k] = edge_node() ;
for (size_t c= 0; c < p->num_contours; c++)
{
if (p->contour[c].num_vertices < 0)
{
/* Ignore the non-contributing contour and repair the vertex count */
p->contour[c].num_vertices= -p->contour[c].num_vertices;
}
else
{
/* Perform contour optimisation */
num_vertices= 0;
for (i= 0; i < p->contour[c].num_vertices; i++)
if (OPTIMAL(p->contour[c].vertex, i, p->contour[c].num_vertices))
{
edge_table[num_vertices].vertex.x= p->contour[c].vertex[i].x;
edge_table[num_vertices].vertex.y= p->contour[c].vertex[i].y;
/* Record vertex in the scanbeam table */
add_to_sbtree(sbt_entries, sbtree,
edge_table[num_vertices].vertex.y);
num_vertices++;
}
/* Do the contour forward pass */
for (min= 0; min < num_vertices; min++)
{
/* If a forward local minimum... */
if (FWD_MIN(edge_table, min, num_vertices))
{
/* Search for the next local maximum... */
num_edges= 1;
max= NEXT_INDEX(min, num_vertices);
while (NOT_FMAX(edge_table, max, num_vertices))
{
num_edges++;
max= NEXT_INDEX(max, num_vertices);
}
/* Build the next edge list */
e= &edge_table[e_index];
e_index+= num_edges;
v= min;
e[0].bstate[BELOW]= UNBUNDLED;
e[0].bundle[BELOW][CLIP]= FALSE;
e[0].bundle[BELOW][SUBJ]= FALSE;
for (i= 0; i < num_edges; i++)
{
e[i].xb= edge_table[v].vertex.x;
e[i].bot.x= edge_table[v].vertex.x;
e[i].bot.y= edge_table[v].vertex.y;
v= NEXT_INDEX(v, num_vertices);
e[i].top.x= edge_table[v].vertex.x;
e[i].top.y= edge_table[v].vertex.y;
e[i].dx= (edge_table[v].vertex.x - e[i].bot.x) /
(e[i].top.y - e[i].bot.y);
e[i].type= type;
e[i].outp[ABOVE]= NULL;
e[i].outp[BELOW]= NULL;
e[i].next= NULL;
e[i].prev= NULL;
e[i].succ= ((num_edges > 1) && (i < (num_edges - 1))) ?
&(e[i + 1]) : NULL;
e[i].pred= ((num_edges > 1) && (i > 0)) ? &(e[i - 1]) : NULL;
e[i].next_bound= NULL;
e[i].bside[CLIP]= (op == GPC_DIFF) ? RIGHT : LEFT;
e[i].bside[SUBJ]= LEFT;
}
insert_bound(bound_list(lmt, edge_table[min].vertex.y), e);
}
}
/* Do the contour reverse pass */
for (min= 0; min < num_vertices; min++)
{
/* If a reverse local minimum... */
if (REV_MIN(edge_table, min, num_vertices))
{
/* Search for the previous local maximum... */
num_edges= 1;
max= PREV_INDEX(min, num_vertices);
while (NOT_RMAX(edge_table, max, num_vertices))
{
num_edges++;
max= PREV_INDEX(max, num_vertices);
}
/* Build the previous edge list */
e= &edge_table[e_index];
e_index+= num_edges;
v= min;
e[0].bstate[BELOW]= UNBUNDLED;
e[0].bundle[BELOW][CLIP]= FALSE;
e[0].bundle[BELOW][SUBJ]= FALSE;
for (i= 0; i < num_edges; i++)
{
e[i].xb= edge_table[v].vertex.x;
e[i].bot.x= edge_table[v].vertex.x;
e[i].bot.y= edge_table[v].vertex.y;
v= PREV_INDEX(v, num_vertices);
e[i].top.x= edge_table[v].vertex.x;
e[i].top.y= edge_table[v].vertex.y;
e[i].dx= (edge_table[v].vertex.x - e[i].bot.x) /
(e[i].top.y - e[i].bot.y);
e[i].type= type;
e[i].outp[ABOVE]= NULL;
e[i].outp[BELOW]= NULL;
e[i].next= NULL;
e[i].prev= NULL;
e[i].succ= ((num_edges > 1) && (i < (num_edges - 1))) ?
&(e[i + 1]) : NULL;
e[i].pred= ((num_edges > 1) && (i > 0)) ? &(e[i - 1]) : NULL;
e[i].next_bound= NULL;
e[i].bside[CLIP]= (op == GPC_DIFF) ? RIGHT : LEFT;
e[i].bside[SUBJ]= LEFT;
}
insert_bound(bound_list(lmt, edge_table[min].vertex.y), e);
}
}
}
}
return edge_table;
}
static void add_edge_to_aet(edge_node **aet, edge_node *edge, edge_node *prev)
{
if (!*aet)
{
/* Append edge onto the tail end of the AET */
*aet= edge;
edge->prev= prev;
edge->next= NULL;
}
else
{
/* Do primary sort on the xb field */
if (edge->xb < (*aet)->xb)
{
/* Insert edge here (before the AET edge) */
edge->prev= prev;
edge->next= *aet;
(*aet)->prev= edge;
*aet= edge;
}
else
{
if (edge->xb == (*aet)->xb)
{
/* Do secondary sort on the dx field */
if (edge->dx < (*aet)->dx)
{
/* Insert edge here (before the AET edge) */
edge->prev= prev;
edge->next= *aet;
(*aet)->prev= edge;
*aet= edge;
}
else
{
/* Head further into the AET */
add_edge_to_aet(&((*aet)->next), edge, *aet);
}
}
else
{
/* Head further into the AET */
add_edge_to_aet(&((*aet)->next), edge, *aet);
}
}
}
}
static void add_intersection(it_node **it, edge_node *edge0, edge_node *edge1,
double x, double y)
{
it_node *existing_node;
if (!*it)
{
/* Append a new node to the tail of the list */
MALLOC(*it, sizeof(it_node), "IT insertion", it_node);
(*it)->ie[0]= edge0;
(*it)->ie[1]= edge1;
(*it)->point.x= x;
(*it)->point.y= y;
(*it)->next= NULL;
}
else
{
if ((*it)->point.y > y)
{
/* Insert a new node mid-list */
existing_node= *it;
MALLOC(*it, sizeof(it_node), "IT insertion", it_node);
(*it)->ie[0]= edge0;
(*it)->ie[1]= edge1;
(*it)->point.x= x;
(*it)->point.y= y;
(*it)->next= existing_node;
}
else
/* Head further down the list */
add_intersection(&((*it)->next), edge0, edge1, x, y);
}
}
static void add_st_edge(st_node **st, it_node **it, edge_node *edge,
double dy)
{
st_node *existing_node;
double den, r, x, y;
if (!*st)
{
/* Append edge onto the tail end of the ST */
MALLOC(*st, sizeof(st_node), "ST insertion", st_node);
(*st)->edge= edge;
(*st)->xb= edge->xb;
(*st)->xt= edge->xt;
(*st)->dx= edge->dx;
(*st)->prev= NULL;
}
else
{
den= ((*st)->xt - (*st)->xb) - (edge->xt - edge->xb);
/* If new edge and ST edge don't cross */
if ((edge->xt >= (*st)->xt) || (edge->dx == (*st)->dx) ||
(fabs(den) <= DBL_EPSILON))
{
/* No intersection - insert edge here (before the ST edge) */
existing_node= *st;
MALLOC(*st, sizeof(st_node), "ST insertion", st_node);
(*st)->edge= edge;
(*st)->xb= edge->xb;
(*st)->xt= edge->xt;
(*st)->dx= edge->dx;
(*st)->prev= existing_node;
}
else
{
/* Compute intersection between new edge and ST edge */
r= (edge->xb - (*st)->xb) / den;
x= (*st)->xb + r * ((*st)->xt - (*st)->xb);
y= r * dy;
/* Insert the edge pointers and the intersection point in the IT */
add_intersection(it, (*st)->edge, edge, x, y);
/* Head further into the ST */
add_st_edge(&((*st)->prev), it, edge, dy);
}
}
}
static void build_intersection_table(it_node **it, edge_node *aet, double dy)
{
st_node *st, *stp;
edge_node *edge;
/* Build intersection table for the current scanbeam */
reset_it(it);
st= NULL;
/* Process each AET edge */
for (edge= aet; edge; edge= edge->next)
{
if ((edge->bstate[ABOVE] == BUNDLE_HEAD) ||
edge->bundle[ABOVE][CLIP] || edge->bundle[ABOVE][SUBJ])
add_st_edge(&st, it, edge, dy);
}
/* Free the sorted edge table */
while (st)
{
stp= st->prev;
FREE(st);
st= stp;
}
}
static int count_contours(polygon_node *polygon)
{
int nc, nv;
vertex_node *v, *nextv;
for (nc= 0; polygon; polygon= polygon->next)
if (polygon->active)
{
/* Count the vertices in the current contour */
nv= 0;
for (v= polygon->proxy->v[LEFT]; v; v= v->next)
nv++;
/* Record valid vertex counts in the active field */
if (nv > 2)
{
polygon->active= nv;
nc++;
}
else
{
/* Invalid contour: just free the heap */
for (v= polygon->proxy->v[LEFT]; v; v= nextv)
{
nextv= v->next;
FREE(v);
}
polygon->active= 0;
}
}
return nc;
}
static void add_left(polygon_node *p, double x, double y)
{
vertex_node *nv;
if(p == NULL) throw runtime_error("GPC: Something's wrong.") ;
/* Create a new vertex node and set its fields */
MALLOC(nv, sizeof(vertex_node), "vertex node creation", vertex_node);
nv->x= x;
nv->y= y;
/* Add vertex nv to the left end of the polygon's vertex list */
nv->next= p->proxy->v[LEFT];
/* Update proxy->[LEFT] to point to nv */
p->proxy->v[LEFT]= nv;
}
static void merge_left(polygon_node *p, polygon_node *q, polygon_node *list)
{
polygon_node *target;
if(p == NULL) throw runtime_error("GPC: Something's wrong.") ;
if(q == NULL) throw runtime_error("GPC: Something's wrong.") ;
/* Label contour as a hole */
q->proxy->hole= TRUE;
if (p->proxy != q->proxy)
{
/* Assign p's vertex list to the left end of q's list */
p->proxy->v[RIGHT]->next= q->proxy->v[LEFT];
q->proxy->v[LEFT]= p->proxy->v[LEFT];
/* Redirect any p->proxy references to q->proxy */
for (target= p->proxy; list; list= list->next)
{
if (list->proxy == target)
{
list->active= FALSE;
list->proxy= q->proxy;
}
}
}
}
static void add_right(polygon_node *p, double x, double y)
{
vertex_node *nv = 0;
if(p == NULL) throw runtime_error("GPC: Something's wrong.") ;
/* Create a new vertex node and set its fields */
MALLOC(nv, sizeof(vertex_node), "vertex node creation", vertex_node);
nv->x= x;
nv->y= y;
nv->next= NULL;
/* Add vertex nv to the right end of the polygon's vertex list */
p->proxy->v[RIGHT]->next= nv;
/* Update proxy->v[RIGHT] to point to nv */
p->proxy->v[RIGHT]= nv;
}
static void merge_right(polygon_node *p, polygon_node *q, polygon_node *list)
{
polygon_node *target = 0;
if(p == NULL) throw runtime_error("GPC: Something's wrong.") ;
if(q == NULL) throw runtime_error("GPC: Something's wrong.") ;
/* Label contour as external */
q->proxy->hole= FALSE;
if (p->proxy != q->proxy)
{
/* Assign p's vertex list to the right end of q's list */
q->proxy->v[RIGHT]->next= p->proxy->v[LEFT];
q->proxy->v[RIGHT]= p->proxy->v[RIGHT];
/* Redirect any p->proxy references to q->proxy */
for (target= p->proxy; list; list= list->next)
{
if (list->proxy == target)
{
list->active= FALSE;
list->proxy= q->proxy;
}
}
}
}
static void add_local_min(polygon_node **p, edge_node *edge,
double x, double y)
{
polygon_node *existing_min = 0;
vertex_node *nv;
existing_min= *p;
MALLOC(*p, sizeof(polygon_node), "polygon node creation", polygon_node);
**p = polygon_node() ;
/* Create a new vertex node and set its fields */
MALLOC(nv, sizeof(vertex_node), "vertex node creation", vertex_node);
*nv = vertex_node() ;
nv->x= x;
nv->y= y;
nv->next= NULL;
/* Initialise proxy to point to p itself */
(*p)->proxy= (*p);
(*p)->active= TRUE;
(*p)->next= existing_min;
/* Make v[LEFT] and v[RIGHT] point to new vertex nv */
(*p)->v[LEFT]= nv;
(*p)->v[RIGHT]= nv;
/* Assign polygon p to the edge */
edge->outp[ABOVE]= *p;
}
static int count_tristrips(polygon_node *tn)
{
int total;
for (total= 0; tn; tn= tn->next)
if (tn->active > 2)
total++;
return total;
}
static void add_vertex(vertex_node **t, double x, double y)
{
if (!(*t))
{
MALLOC(*t, sizeof(vertex_node), "tristrip vertex creation", vertex_node);
(*t)->x= x;
(*t)->y= y;
(*t)->next= NULL;
}
else
/* Head further down the list */
add_vertex(&((*t)->next), x, y);
}
static void new_tristrip(polygon_node **tn, edge_node *edge,
double x, double y)
{
if (!(*tn))
{
MALLOC(*tn, sizeof(polygon_node), "tristrip node creation", polygon_node);
**tn = polygon_node() ;
(*tn)->next= NULL;
(*tn)->v[LEFT]= NULL;
(*tn)->v[RIGHT]= NULL;
(*tn)->active= 1;
add_vertex(&((*tn)->v[LEFT]), x, y);
edge->outp[ABOVE]= *tn;
}
else
/* Head further down the list */
new_tristrip(&((*tn)->next), edge, x, y);
}
static bbox *create_contour_bboxes(gpc_polygon *p)
{
bbox *box;
int v;
MALLOC(box, p->num_contours * sizeof(bbox), "Bounding box creation", bbox);
/* Construct contour bounding boxes */
for (size_t c= 0; c < p->num_contours; c++)
{
/* Initialise bounding box extent */
box[c].xmin= DBL_MAX;
box[c].ymin= DBL_MAX;
box[c].xmax= -DBL_MAX;
box[c].ymax= -DBL_MAX;
for (v= 0; v < p->contour[c].num_vertices; v++)
{
/* Adjust bounding box */
if (p->contour[c].vertex[v].x < box[c].xmin)
box[c].xmin= p->contour[c].vertex[v].x;
if (p->contour[c].vertex[v].y < box[c].ymin)
box[c].ymin= p->contour[c].vertex[v].y;
if (p->contour[c].vertex[v].x > box[c].xmax)
box[c].xmax= p->contour[c].vertex[v].x;
if (p->contour[c].vertex[v].y > box[c].ymax)
box[c].ymax= p->contour[c].vertex[v].y;
}
}
return box;
}
static void minimax_test(gpc_polygon *subj, gpc_polygon *clip, gpc_op op)
{
bbox *s_bbox, *c_bbox;
int *o_table, overlap;
s_bbox= create_contour_bboxes(subj);
c_bbox= create_contour_bboxes(clip);
MALLOC(o_table, subj->num_contours * clip->num_contours * sizeof(int),
"overlap table creation", int);
/* Check all subject contour bounding boxes against clip boxes */
for (size_t s= 0; s < subj->num_contours; s++)
for (size_t c= 0; c < clip->num_contours; c++)
o_table[c * subj->num_contours + s]=
(!((s_bbox[s].xmax < c_bbox[c].xmin) ||
(s_bbox[s].xmin > c_bbox[c].xmax))) &&
(!((s_bbox[s].ymax < c_bbox[c].ymin) ||
(s_bbox[s].ymin > c_bbox[c].ymax)));
/* For each clip contour, search for any subject contour overlaps */
for (size_t c= 0; c < clip->num_contours; c++)
{
overlap= 0;
for (size_t s= 0; (!overlap) && (s < subj->num_contours); s++)
overlap= o_table[c * subj->num_contours + s];
if (!overlap)
/* Flag non contributing status by negating vertex count */
clip->contour[c].num_vertices = -clip->contour[c].num_vertices;
}
if (op == GPC_INT)
{
/* For each subject contour, search for any clip contour overlaps */
for (size_t s= 0; s < subj->num_contours; s++)
{
overlap= 0;
for (size_t c= 0; (!overlap) && (c < clip->num_contours); c++)
overlap= o_table[c * subj->num_contours + s];
if (!overlap)
/* Flag non contributing status by negating vertex count */
subj->contour[s].num_vertices = -subj->contour[s].num_vertices;
}
}
FREE(s_bbox);
FREE(c_bbox);
FREE(o_table);
}
/*
===========================================================================
Public Functions
===========================================================================
*/
void gpc_free_polygon(gpc_polygon *p)
{
for (size_t c= 0; c < p->num_contours; c++)
FREE(p->contour[c].vertex);
FREE(p->hole);
FREE(p->contour);
p->num_contours= 0;
}
/* Unused and fscanf creates compilation warnings
void gpc_read_polygon(FILE *fp, int read_hole_flags, gpc_polygon *p)
{
int c, v;
fscanf(fp, "%d", &(p->num_contours));
MALLOC(p->hole, p->num_contours * sizeof(int),
"hole flag array creation", int);
MALLOC(p->contour, p->num_contours
* sizeof(gpc_vertex_list), "contour creation", gpc_vertex_list);
for (c= 0; c < p->num_contours; c++)
{
fscanf(fp, "%d", &(p->contour[c].num_vertices));
if (read_hole_flags)
fscanf(fp, "%d", &(p->hole[c]));
else
p->hole[c]= FALSE; // Assume all contours to be external
MALLOC(p->contour[c].vertex, p->contour[c].num_vertices
* sizeof(gpc_vertex), "vertex creation", gpc_vertex);
for (v= 0; v < p->contour[c].num_vertices; v++)
fscanf(fp, "%lf %lf", &(p->contour[c].vertex[v].x),
&(p->contour[c].vertex[v].y));
}
}
*/
void gpc_write_polygon(FILE *fp, int write_hole_flags, gpc_polygon *p)
{
fprintf(fp, "%lu\n", p->num_contours);
for (size_t c= 0; c < p->num_contours; c++)
{
fprintf(fp, "%lu\n", p->contour[c].num_vertices);
if (write_hole_flags)
fprintf(fp, "%d\n", p->hole[c]);
for (long v= 0; v < p->contour[c].num_vertices; v++)
fprintf(fp, "% .*lf % .*lf\n",
DBL_DIG, p->contour[c].vertex[v].x,
DBL_DIG, p->contour[c].vertex[v].y);
}
}
void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole)
{
int *extended_hole;
size_t c;
gpc_vertex_list *extended_contour;
/* Create an extended hole array */
MALLOC(extended_hole, (p->num_contours + 1)
* sizeof(int), "contour hole addition", int);
/* Create an extended contour array */
MALLOC(extended_contour, (p->num_contours + 1)
* sizeof(gpc_vertex_list), "contour addition", gpc_vertex_list);
/* Copy the old contour and hole data into the extended arrays */
for (c= 0; c < p->num_contours; c++)
{
extended_hole[c]= p->hole[c];
extended_contour[c]= p->contour[c];
}
/* Copy the new contour and hole onto the end of the extended arrays */
c= p->num_contours;
extended_hole[c]= hole;
extended_contour[c].num_vertices= new_contour->num_vertices;
MALLOC(extended_contour[c].vertex, new_contour->num_vertices
* sizeof(gpc_vertex), "contour addition", gpc_vertex);
for (long v= 0; v < new_contour->num_vertices; v++)
extended_contour[c].vertex[v]= new_contour->vertex[v];
/* Dispose of the old contour */
FREE(p->contour);
FREE(p->hole);
/* Update the polygon information */
p->num_contours++;
p->hole= extended_hole;
p->contour= extended_contour;
}
void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
gpc_polygon *result)
{
sb_tree *sbtree= NULL;
it_node *it= NULL, *intersect=0;
edge_node *edge=0, *prev_edge=0, *next_edge=0, *succ_edge=0, *e0=0, *e1=0;
edge_node *aet= NULL, *c_heap= NULL, *s_heap= NULL;
lmt_node *lmt= NULL, *local_min=0;
polygon_node *out_poly= NULL, *p=0, *q=0, *poly=0, *npoly=0, *cf= NULL;
vertex_node *vtx=0, *nv=0;
h_state horiz[2];
int in[2], exists[2], parity[2]= {LEFT, LEFT};
int c, v, contributing=0, search, scanbeam= 0, sbt_entries= 0;
int vclass=0, bl=0, br=0, tl=0, tr=0;
double *sbt= NULL, xb, px, yb, yt=0.0, dy=0.0, ix, iy;
/* Test for trivial NULL result cases */
if (((subj->num_contours == 0) && (clip->num_contours == 0))
|| ((subj->num_contours == 0) && ((op == GPC_INT) || (op == GPC_DIFF)))
|| ((clip->num_contours == 0) && (op == GPC_INT)))
{
result->num_contours= 0;
result->hole= NULL;
result->contour= NULL;
return;
}
/* Identify potentialy contributing contours */
if (((op == GPC_INT) || (op == GPC_DIFF))
&& (subj->num_contours > 0) && (clip->num_contours > 0))
minimax_test(subj, clip, op);
/* Build LMT */
if (subj->num_contours > 0)
s_heap= build_lmt(&lmt, &sbtree, &sbt_entries, subj, SUBJ, op);
if (clip->num_contours > 0)
c_heap= build_lmt(&lmt, &sbtree, &sbt_entries, clip, CLIP, op);
/* Return a NULL result if no contours contribute */
if (lmt == NULL)
{
result->num_contours= 0;
result->hole= NULL;
result->contour= NULL;
reset_lmt(&lmt);
FREE(s_heap);
FREE(c_heap);
return;
}
/* Build scanbeam table from scanbeam tree */
MALLOC(sbt, sbt_entries * sizeof(double), "sbt creation", double);
build_sbt(&scanbeam, sbt, sbtree);
scanbeam= 0;
free_sbtree(&sbtree);
/* Allow pointer re-use without causing memory leak */
if (subj == result)
gpc_free_polygon(subj);
if (clip == result)
gpc_free_polygon(clip);
/* Invert clip polygon for difference operation */
if (op == GPC_DIFF)
parity[CLIP]= RIGHT;
local_min= lmt;
/* Process each scanbeam */
while (scanbeam < sbt_entries)
{
/* Set yb and yt to the bottom and top of the scanbeam */
yb= sbt[scanbeam++];
if (scanbeam < sbt_entries)
{
yt= sbt[scanbeam];
dy= yt - yb;
}
/* === SCANBEAM BOUNDARY PROCESSING ================================ */
/* If LMT node corresponding to yb exists */
if (local_min)
{
if (local_min->y == yb)
{
/* Add edges starting at this local minimum to the AET */
for (edge= local_min->first_bound; edge; edge= edge->next_bound)
add_edge_to_aet(&aet, edge, NULL);
local_min= local_min->next;
}
}
/* Set dummy previous x value */
px= -DBL_MAX;
/* Create bundles within AET */
e0= aet;
e1= aet;
/* Set up bundle fields of first edge */
aet->bundle[ABOVE][ aet->type]= (aet->top.y != yb);
aet->bundle[ABOVE][!aet->type]= FALSE;
aet->bstate[ABOVE]= UNBUNDLED;
for (next_edge= aet->next; next_edge; next_edge= next_edge->next)
{
/* Set up bundle fields of next edge */
next_edge->bundle[ABOVE][ next_edge->type]= (next_edge->top.y != yb);
next_edge->bundle[ABOVE][!next_edge->type]= FALSE;
next_edge->bstate[ABOVE]= UNBUNDLED;
/* Bundle edges above the scanbeam boundary if they coincide */
if (next_edge->bundle[ABOVE][next_edge->type])
{
if (EQ(e0->xb, next_edge->xb) && EQ(e0->dx, next_edge->dx)
&& (e0->top.y != yb))
{
next_edge->bundle[ABOVE][ next_edge->type]^=
e0->bundle[ABOVE][ next_edge->type];
next_edge->bundle[ABOVE][!next_edge->type]=
e0->bundle[ABOVE][!next_edge->type];
next_edge->bstate[ABOVE]= BUNDLE_HEAD;
e0->bundle[ABOVE][CLIP]= FALSE;
e0->bundle[ABOVE][SUBJ]= FALSE;
e0->bstate[ABOVE]= BUNDLE_TAIL;
}
e0= next_edge;
}
}
horiz[CLIP]= NH;
horiz[SUBJ]= NH;
/* Process each edge at this scanbeam boundary */
for (edge= aet; edge; edge= edge->next)
{
exists[CLIP]= edge->bundle[ABOVE][CLIP] +
(edge->bundle[BELOW][CLIP] << 1);
exists[SUBJ]= edge->bundle[ABOVE][SUBJ] +
(edge->bundle[BELOW][SUBJ] << 1);
if (exists[CLIP] || exists[SUBJ])
{
/* Set bundle side */
edge->bside[CLIP]= parity[CLIP];
edge->bside[SUBJ]= parity[SUBJ];
/* Determine contributing status and quadrant occupancies */
switch (op)
{
case GPC_DIFF:
case GPC_INT:
contributing= (exists[CLIP] && (parity[SUBJ] || horiz[SUBJ]))
|| (exists[SUBJ] && (parity[CLIP] || horiz[CLIP]))
|| (exists[CLIP] && exists[SUBJ]
&& (parity[CLIP] == parity[SUBJ]));
br= (parity[CLIP])
&& (parity[SUBJ]);
bl= (parity[CLIP] ^ edge->bundle[ABOVE][CLIP])
&& (parity[SUBJ] ^ edge->bundle[ABOVE][SUBJ]);
tr= (parity[CLIP] ^ (horiz[CLIP]!=NH))
&& (parity[SUBJ] ^ (horiz[SUBJ]!=NH));
tl= (parity[CLIP] ^ (horiz[CLIP]!=NH) ^ edge->bundle[BELOW][CLIP])
&& (parity[SUBJ] ^ (horiz[SUBJ]!=NH) ^ edge->bundle[BELOW][SUBJ]);
break;
case GPC_XOR:
contributing= exists[CLIP] || exists[SUBJ];
br= (parity[CLIP])
^ (parity[SUBJ]);
bl= (parity[CLIP] ^ edge->bundle[ABOVE][CLIP])
^ (parity[SUBJ] ^ edge->bundle[ABOVE][SUBJ]);
tr= (parity[CLIP] ^ (horiz[CLIP]!=NH))
^ (parity[SUBJ] ^ (horiz[SUBJ]!=NH));
tl= (parity[CLIP] ^ (horiz[CLIP]!=NH) ^ edge->bundle[BELOW][CLIP])
^ (parity[SUBJ] ^ (horiz[SUBJ]!=NH) ^ edge->bundle[BELOW][SUBJ]);
break;
case GPC_UNION:
contributing= (exists[CLIP] && (!parity[SUBJ] || horiz[SUBJ]))
|| (exists[SUBJ] && (!parity[CLIP] || horiz[CLIP]))
|| (exists[CLIP] && exists[SUBJ]
&& (parity[CLIP] == parity[SUBJ]));
br= (parity[CLIP])
|| (parity[SUBJ]);
bl= (parity[CLIP] ^ edge->bundle[ABOVE][CLIP])
|| (parity[SUBJ] ^ edge->bundle[ABOVE][SUBJ]);
tr= (parity[CLIP] ^ (horiz[CLIP]!=NH))
|| (parity[SUBJ] ^ (horiz[SUBJ]!=NH));
tl= (parity[CLIP] ^ (horiz[CLIP]!=NH) ^ edge->bundle[BELOW][CLIP])
|| (parity[SUBJ] ^ (horiz[SUBJ]!=NH) ^ edge->bundle[BELOW][SUBJ]);
break;
}
/* Update parity */
parity[CLIP]^= edge->bundle[ABOVE][CLIP];
parity[SUBJ]^= edge->bundle[ABOVE][SUBJ];
/* Update horizontal state */
if (exists[CLIP])
horiz[CLIP]=
next_h_state[horiz[CLIP]]
[((exists[CLIP] - 1) << 1) + parity[CLIP]];
if (exists[SUBJ])
horiz[SUBJ]=
next_h_state[horiz[SUBJ]]
[((exists[SUBJ] - 1) << 1) + parity[SUBJ]];
vclass= tr + (tl << 1) + (br << 2) + (bl << 3);
if (contributing)
{
xb= edge->xb;
switch (vclass)
{
case EMN:
case IMN:
add_local_min(&out_poly, edge, xb, yb);
px= xb;
cf= edge->outp[ABOVE];
break;
case ERI:
if (xb != px)
{
add_right(cf, xb, yb);
px= xb;
}
edge->outp[ABOVE]= cf;
cf= NULL;
break;
case ELI:
add_left(edge->outp[BELOW], xb, yb);
px= xb;
cf= edge->outp[BELOW];
break;
case EMX:
if (xb != px)
{
add_left(cf, xb, yb);
px= xb;
}
merge_right(cf, edge->outp[BELOW], out_poly);
cf= NULL;
break;
case ILI:
if (xb != px)
{
add_left(cf, xb, yb);
px= xb;
}
edge->outp[ABOVE]= cf;
cf= NULL;
break;
case IRI:
add_right(edge->outp[BELOW], xb, yb);
px= xb;
cf= edge->outp[BELOW];
edge->outp[BELOW]= NULL;
break;
case IMX:
if (xb != px)
{
add_right(cf, xb, yb);
px= xb;
}
merge_left(cf, edge->outp[BELOW], out_poly);
cf= NULL;
edge->outp[BELOW]= NULL;
break;
case IMM:
if (xb != px)
{
add_right(cf, xb, yb);
px= xb;
}
merge_left(cf, edge->outp[BELOW], out_poly);
edge->outp[BELOW]= NULL;
add_local_min(&out_poly, edge, xb, yb);
cf= edge->outp[ABOVE];
break;
case EMM:
if (xb != px)
{
add_left(cf, xb, yb);
px= xb;
}
merge_right(cf, edge->outp[BELOW], out_poly);
edge->outp[BELOW]= NULL;
add_local_min(&out_poly, edge, xb, yb);
cf= edge->outp[ABOVE];
break;
case LED:
if (edge->bot.y == yb)
add_left(edge->outp[BELOW], xb, yb);
edge->outp[ABOVE]= edge->outp[BELOW];
px= xb;
break;
case RED:
if (edge->bot.y == yb)
add_right(edge->outp[BELOW], xb, yb);
edge->outp[ABOVE]= edge->outp[BELOW];
px= xb;
break;
default:
break;
} /* End of switch */
} /* End of contributing conditional */
} /* End of edge exists conditional */
} /* End of AET loop */
/* Delete terminating edges from the AET, otherwise compute xt */
for (edge= aet; edge; edge= edge->next)
{
if (edge->top.y == yb)
{
prev_edge= edge->prev;
next_edge= edge->next;
if (prev_edge)
prev_edge->next= next_edge;
else
aet= next_edge;
if (next_edge)
next_edge->prev= prev_edge;
/* Copy bundle head state to the adjacent tail edge if required */
if ((edge->bstate[BELOW] == BUNDLE_HEAD) && prev_edge)
{
if (prev_edge->bstate[BELOW] == BUNDLE_TAIL)
{
prev_edge->outp[BELOW]= edge->outp[BELOW];
prev_edge->bstate[BELOW]= UNBUNDLED;
if (prev_edge->prev)
if (prev_edge->prev->bstate[BELOW] == BUNDLE_TAIL)
prev_edge->bstate[BELOW]= BUNDLE_HEAD;
}
}
}
else
{
if (edge->top.y == yt)
edge->xt= edge->top.x;
else
edge->xt= edge->bot.x + edge->dx * (yt - edge->bot.y);
}
}
if (scanbeam < sbt_entries)
{
/* === SCANBEAM INTERIOR PROCESSING ============================== */
build_intersection_table(&it, aet, dy);
/* Process each node in the intersection table */
for (intersect= it; intersect; intersect= intersect->next)
{
e0= intersect->ie[0];
e1= intersect->ie[1];
/* Only generate output for contributing intersections */
if ((e0->bundle[ABOVE][CLIP] || e0->bundle[ABOVE][SUBJ])
&& (e1->bundle[ABOVE][CLIP] || e1->bundle[ABOVE][SUBJ]))
{
p= e0->outp[ABOVE];
q= e1->outp[ABOVE];
ix= intersect->point.x;
iy= intersect->point.y + yb;
in[CLIP]= ( e0->bundle[ABOVE][CLIP] && !e0->bside[CLIP])
|| ( e1->bundle[ABOVE][CLIP] && e1->bside[CLIP])
|| (!e0->bundle[ABOVE][CLIP] && !e1->bundle[ABOVE][CLIP]
&& e0->bside[CLIP] && e1->bside[CLIP]);
in[SUBJ]= ( e0->bundle[ABOVE][SUBJ] && !e0->bside[SUBJ])
|| ( e1->bundle[ABOVE][SUBJ] && e1->bside[SUBJ])
|| (!e0->bundle[ABOVE][SUBJ] && !e1->bundle[ABOVE][SUBJ]
&& e0->bside[SUBJ] && e1->bside[SUBJ]);
/* Determine quadrant occupancies */
switch (op)
{
case GPC_DIFF:
case GPC_INT:
tr= (in[CLIP])
&& (in[SUBJ]);
tl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP])
&& (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ]);
br= (in[CLIP] ^ e0->bundle[ABOVE][CLIP])
&& (in[SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
bl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP] ^ e0->bundle[ABOVE][CLIP])
&& (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
break;
case GPC_XOR:
tr= (in[CLIP])
^ (in[SUBJ]);
tl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP])
^ (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ]);
br= (in[CLIP] ^ e0->bundle[ABOVE][CLIP])
^ (in[SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
bl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP] ^ e0->bundle[ABOVE][CLIP])
^ (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
break;
case GPC_UNION:
tr= (in[CLIP])
|| (in[SUBJ]);
tl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP])
|| (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ]);
br= (in[CLIP] ^ e0->bundle[ABOVE][CLIP])
|| (in[SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
bl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP] ^ e0->bundle[ABOVE][CLIP])
|| (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
break;
}
vclass= tr + (tl << 1) + (br << 2) + (bl << 3);
switch (vclass)
{
case EMN:
add_local_min(&out_poly, e0, ix, iy);
e1->outp[ABOVE]= e0->outp[ABOVE];
break;
case ERI:
if (p)
{
add_right(p, ix, iy);
e1->outp[ABOVE]= p;
e0->outp[ABOVE]= NULL;
}
break;
case ELI:
if (q)
{
add_left(q, ix, iy);
e0->outp[ABOVE]= q;
e1->outp[ABOVE]= NULL;
}
break;
case EMX:
if (p && q)
{
add_left(p, ix, iy);
merge_right(p, q, out_poly);
e0->outp[ABOVE]= NULL;
e1->outp[ABOVE]= NULL;
}
break;
case IMN:
add_local_min(&out_poly, e0, ix, iy);
e1->outp[ABOVE]= e0->outp[ABOVE];
break;
case ILI:
if (p)
{
add_left(p, ix, iy);
e1->outp[ABOVE]= p;
e0->outp[ABOVE]= NULL;
}
break;
case IRI:
if (q)
{
add_right(q, ix, iy);
e0->outp[ABOVE]= q;
e1->outp[ABOVE]= NULL;
}
break;
case IMX:
if (p && q)
{
add_right(p, ix, iy);
merge_left(p, q, out_poly);
e0->outp[ABOVE]= NULL;
e1->outp[ABOVE]= NULL;
}
break;
case IMM:
if (p && q)
{
add_right(p, ix, iy);
merge_left(p, q, out_poly);
add_local_min(&out_poly, e0, ix, iy);
e1->outp[ABOVE]= e0->outp[ABOVE];
}
break;
case EMM:
if (p && q)
{
add_left(p, ix, iy);
merge_right(p, q, out_poly);
add_local_min(&out_poly, e0, ix, iy);
e1->outp[ABOVE]= e0->outp[ABOVE];
}
break;
default:
break;
} /* End of switch */
} /* End of contributing intersection conditional */
/* Swap bundle sides in response to edge crossing */
if (e0->bundle[ABOVE][CLIP])
e1->bside[CLIP]= !e1->bside[CLIP];
if (e1->bundle[ABOVE][CLIP])
e0->bside[CLIP]= !e0->bside[CLIP];
if (e0->bundle[ABOVE][SUBJ])
e1->bside[SUBJ]= !e1->bside[SUBJ];
if (e1->bundle[ABOVE][SUBJ])
e0->bside[SUBJ]= !e0->bside[SUBJ];
/* Swap e0 and e1 bundles in the AET */
prev_edge= e0->prev;
next_edge= e1->next;
if (next_edge)
next_edge->prev= e0;
if (e0->bstate[ABOVE] == BUNDLE_HEAD)
{
search= TRUE;
while (search)
{
prev_edge= prev_edge->prev;
if (prev_edge)
{
if (prev_edge->bstate[ABOVE] != BUNDLE_TAIL)
search= FALSE;
}
else
search= FALSE;
}
}
if (!prev_edge)
{
aet->prev= e1;
e1->next= aet;
aet= e0->next;
}
else
{
prev_edge->next->prev= e1;
e1->next= prev_edge->next;
prev_edge->next= e0->next;
}
if(e0->next == NULL) throw runtime_error("GPC internal error.") ;
if(e1->next == NULL) throw runtime_error("GPC internal error.") ;
e0->next->prev= prev_edge;
e1->next->prev= e1;
e0->next= next_edge;
} /* End of IT loop*/
/* Prepare for next scanbeam */
for (edge= aet; edge; edge= next_edge)
{
next_edge= edge->next;
succ_edge= edge->succ;
if ((edge->top.y == yt) && succ_edge)
{
/* Replace AET edge by its successor */
succ_edge->outp[BELOW]= edge->outp[ABOVE];
succ_edge->bstate[BELOW]= edge->bstate[ABOVE];
succ_edge->bundle[BELOW][CLIP]= edge->bundle[ABOVE][CLIP];
succ_edge->bundle[BELOW][SUBJ]= edge->bundle[ABOVE][SUBJ];
prev_edge= edge->prev;
if (prev_edge)
prev_edge->next= succ_edge;
else
aet= succ_edge;
if (next_edge)
next_edge->prev= succ_edge;
succ_edge->prev= prev_edge;
succ_edge->next= next_edge;
}
else
{
/* Update this edge */
edge->outp[BELOW]= edge->outp[ABOVE];
edge->bstate[BELOW]= edge->bstate[ABOVE];
edge->bundle[BELOW][CLIP]= edge->bundle[ABOVE][CLIP];
edge->bundle[BELOW][SUBJ]= edge->bundle[ABOVE][SUBJ];
edge->xb= edge->xt;
}
edge->outp[ABOVE]= NULL;
}
}
} /* === END OF SCANBEAM PROCESSING ================================== */
/* Generate result polygon from out_poly */
result->contour= NULL;
result->hole= NULL;
result->num_contours= count_contours(out_poly);
if (result->num_contours > 0)
{
MALLOC(result->hole, result->num_contours
* sizeof(int), "hole flag table creation", int);
MALLOC(result->contour, result->num_contours
* sizeof(gpc_vertex_list), "contour creation", gpc_vertex_list);
c= 0;
for (poly= out_poly; poly; poly= npoly)
{
npoly= poly->next;
if (poly->active)
{
result->hole[c]= poly->proxy->hole;
result->contour[c].num_vertices= poly->active;
MALLOC(result->contour[c].vertex,
result->contour[c].num_vertices * sizeof(gpc_vertex),
"vertex creation", gpc_vertex);
v= result->contour[c].num_vertices - 1;
for (vtx= poly->proxy->v[LEFT]; vtx; vtx= nv)
{
nv= vtx->next;
result->contour[c].vertex[v].x= vtx->x;
result->contour[c].vertex[v].y= vtx->y;
FREE(vtx);
v--;
}
c++;
}
FREE(poly);
}
}
else
{
for (poly= out_poly; poly; poly= npoly)
{
npoly= poly->next;
FREE(poly);
}
}
/* Tidy up */
reset_it(&it);
reset_lmt(&lmt);
FREE(c_heap);
FREE(s_heap);
FREE(sbt);
}
void gpc_free_tristrip(gpc_tristrip *t)
{
for (size_t s= 0; s < t->num_strips; s++)
FREE(t->strip[s].vertex);
FREE(t->strip);
t->num_strips= 0;
}
void gpc_polygon_to_tristrip(gpc_polygon *s, gpc_tristrip *t)
{
gpc_polygon c;
c.num_contours= 0;
c.hole= NULL;
c.contour= NULL;
gpc_tristrip_clip(GPC_DIFF, s, &c, t);
}
void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
gpc_tristrip *result)
{
sb_tree *sbtree= NULL;
it_node *it= NULL, *intersect;
edge_node *edge=0, *prev_edge=0, *next_edge=0, *succ_edge=0, *e0=0, *e1=0;
edge_node *aet= NULL, *c_heap= NULL, *s_heap= NULL, *cf=0;
lmt_node *lmt= NULL, *local_min;
polygon_node *tlist= NULL, *tn, *tnn, *p, *q;
vertex_node *lt, *ltn, *rt, *rtn;
h_state horiz[2];
vertex_type cft = NUL;
int in[2], exists[2], parity[2]= {LEFT, LEFT};
int s, v, contributing=0, search, scanbeam= 0, sbt_entries= 0;
int vclass=0, bl=0, br=0, tl=0, tr=0;
double *sbt= NULL, xb, px, nx, yb, yt=0.0, dy=0.0, ix, iy;
/* Test for trivial NULL result cases */
if (((subj->num_contours == 0) && (clip->num_contours == 0))
|| ((subj->num_contours == 0) && ((op == GPC_INT) || (op == GPC_DIFF)))
|| ((clip->num_contours == 0) && (op == GPC_INT)))
{
result->num_strips= 0;
result->strip= NULL;
return;
}
/* Identify potentialy contributing contours */
if (((op == GPC_INT) || (op == GPC_DIFF))
&& (subj->num_contours > 0) && (clip->num_contours > 0))
minimax_test(subj, clip, op);
/* Build LMT */
if (subj->num_contours > 0)
s_heap= build_lmt(&lmt, &sbtree, &sbt_entries, subj, SUBJ, op);
if (clip->num_contours > 0)
c_heap= build_lmt(&lmt, &sbtree, &sbt_entries, clip, CLIP, op);
/* Return a NULL result if no contours contribute */
if (lmt == NULL)
{
result->num_strips= 0;
result->strip= NULL;
reset_lmt(&lmt);
FREE(s_heap);
FREE(c_heap);
return;
}
/* Build scanbeam table from scanbeam tree */
MALLOC(sbt, sbt_entries * sizeof(double), "sbt creation", double);
build_sbt(&scanbeam, sbt, sbtree);
scanbeam= 0;
free_sbtree(&sbtree);
/* Invert clip polygon for difference operation */
if (op == GPC_DIFF)
parity[CLIP]= RIGHT;
local_min= lmt;
/* Process each scanbeam */
while (scanbeam < sbt_entries)
{
/* Set yb and yt to the bottom and top of the scanbeam */
yb= sbt[scanbeam++];
if (scanbeam < sbt_entries)
{
yt= sbt[scanbeam];
dy= yt - yb;
}
/* === SCANBEAM BOUNDARY PROCESSING ================================ */
/* If LMT node corresponding to yb exists */
if (local_min)
{
if (local_min->y == yb)
{
/* Add edges starting at this local minimum to the AET */
for (edge= local_min->first_bound; edge; edge= edge->next_bound)
add_edge_to_aet(&aet, edge, NULL);
local_min= local_min->next;
}
}
/* Set dummy previous x value */
px= -DBL_MAX;
/* Create bundles within AET */
e0= aet;
e1= aet;
/* Set up bundle fields of first edge */
aet->bundle[ABOVE][ aet->type]= (aet->top.y != yb);
aet->bundle[ABOVE][!aet->type]= FALSE;
aet->bstate[ABOVE]= UNBUNDLED;
for (next_edge= aet->next; next_edge; next_edge= next_edge->next)
{
/* Set up bundle fields of next edge */
next_edge->bundle[ABOVE][ next_edge->type]= (next_edge->top.y != yb);
next_edge->bundle[ABOVE][!next_edge->type]= FALSE;
next_edge->bstate[ABOVE]= UNBUNDLED;
/* Bundle edges above the scanbeam boundary if they coincide */
if (next_edge->bundle[ABOVE][next_edge->type])
{
if (EQ(e0->xb, next_edge->xb) && EQ(e0->dx, next_edge->dx)
&& (e0->top.y != yb))
{
next_edge->bundle[ABOVE][ next_edge->type]^=
e0->bundle[ABOVE][ next_edge->type];
next_edge->bundle[ABOVE][!next_edge->type]=
e0->bundle[ABOVE][!next_edge->type];
next_edge->bstate[ABOVE]= BUNDLE_HEAD;
e0->bundle[ABOVE][CLIP]= FALSE;
e0->bundle[ABOVE][SUBJ]= FALSE;
e0->bstate[ABOVE]= BUNDLE_TAIL;
}
e0= next_edge;
}
}
horiz[CLIP]= NH;
horiz[SUBJ]= NH;
/* Process each edge at this scanbeam boundary */
for (edge= aet; edge; edge= edge->next)
{
exists[CLIP]= edge->bundle[ABOVE][CLIP] +
(edge->bundle[BELOW][CLIP] << 1);
exists[SUBJ]= edge->bundle[ABOVE][SUBJ] +
(edge->bundle[BELOW][SUBJ] << 1);
if (exists[CLIP] || exists[SUBJ])
{
/* Set bundle side */
edge->bside[CLIP]= parity[CLIP];
edge->bside[SUBJ]= parity[SUBJ];
/* Determine contributing status and quadrant occupancies */
switch (op)
{
case GPC_DIFF:
case GPC_INT:
contributing= (exists[CLIP] && (parity[SUBJ] || horiz[SUBJ]))
|| (exists[SUBJ] && (parity[CLIP] || horiz[CLIP]))
|| (exists[CLIP] && exists[SUBJ]
&& (parity[CLIP] == parity[SUBJ]));
br= (parity[CLIP])
&& (parity[SUBJ]);
bl= (parity[CLIP] ^ edge->bundle[ABOVE][CLIP])
&& (parity[SUBJ] ^ edge->bundle[ABOVE][SUBJ]);
tr= (parity[CLIP] ^ (horiz[CLIP]!=NH))
&& (parity[SUBJ] ^ (horiz[SUBJ]!=NH));
tl= (parity[CLIP] ^ (horiz[CLIP]!=NH) ^ edge->bundle[BELOW][CLIP])
&& (parity[SUBJ] ^ (horiz[SUBJ]!=NH) ^ edge->bundle[BELOW][SUBJ]);
break;
case GPC_XOR:
contributing= exists[CLIP] || exists[SUBJ];
br= (parity[CLIP])
^ (parity[SUBJ]);
bl= (parity[CLIP] ^ edge->bundle[ABOVE][CLIP])
^ (parity[SUBJ] ^ edge->bundle[ABOVE][SUBJ]);
tr= (parity[CLIP] ^ (horiz[CLIP]!=NH))
^ (parity[SUBJ] ^ (horiz[SUBJ]!=NH));
tl= (parity[CLIP] ^ (horiz[CLIP]!=NH) ^ edge->bundle[BELOW][CLIP])
^ (parity[SUBJ] ^ (horiz[SUBJ]!=NH) ^ edge->bundle[BELOW][SUBJ]);
break;
case GPC_UNION:
contributing= (exists[CLIP] && (!parity[SUBJ] || horiz[SUBJ]))
|| (exists[SUBJ] && (!parity[CLIP] || horiz[CLIP]))
|| (exists[CLIP] && exists[SUBJ]
&& (parity[CLIP] == parity[SUBJ]));
br= (parity[CLIP])
|| (parity[SUBJ]);
bl= (parity[CLIP] ^ edge->bundle[ABOVE][CLIP])
|| (parity[SUBJ] ^ edge->bundle[ABOVE][SUBJ]);
tr= (parity[CLIP] ^ (horiz[CLIP]!=NH))
|| (parity[SUBJ] ^ (horiz[SUBJ]!=NH));
tl= (parity[CLIP] ^ (horiz[CLIP]!=NH) ^ edge->bundle[BELOW][CLIP])
|| (parity[SUBJ] ^ (horiz[SUBJ]!=NH) ^ edge->bundle[BELOW][SUBJ]);
break;
}
/* Update parity */
parity[CLIP]^= edge->bundle[ABOVE][CLIP];
parity[SUBJ]^= edge->bundle[ABOVE][SUBJ];
/* Update horizontal state */
if (exists[CLIP])
horiz[CLIP]=
next_h_state[horiz[CLIP]]
[((exists[CLIP] - 1) << 1) + parity[CLIP]];
if (exists[SUBJ])
horiz[SUBJ]=
next_h_state[horiz[SUBJ]]
[((exists[SUBJ] - 1) << 1) + parity[SUBJ]];
vclass= tr + (tl << 1) + (br << 2) + (bl << 3);
if (contributing)
{
xb= edge->xb;
switch (vclass)
{
case EMN:
new_tristrip(&tlist, edge, xb, yb);
cf= edge;
break;
case ERI:
edge->outp[ABOVE]= cf->outp[ABOVE];
if (xb != cf->xb)
VERTEX(edge, ABOVE, RIGHT, xb, yb);
cf= NULL;
break;
case ELI:
VERTEX(edge, BELOW, LEFT, xb, yb);
edge->outp[ABOVE]= NULL;
cf= edge;
break;
case EMX:
if (xb != cf->xb)
VERTEX(edge, BELOW, RIGHT, xb, yb);
edge->outp[ABOVE]= NULL;
cf= NULL;
break;
case IMN:
if (cft == LED)
{
if (cf->bot.y != yb)
VERTEX(cf, BELOW, LEFT, cf->xb, yb);
new_tristrip(&tlist, cf, cf->xb, yb);
}
edge->outp[ABOVE]= cf->outp[ABOVE];
VERTEX(edge, ABOVE, RIGHT, xb, yb);
break;
case ILI:
new_tristrip(&tlist, edge, xb, yb);
cf= edge;
cft= ILI;
break;
case IRI:
if (cft == LED)
{
if (cf->bot.y != yb)
VERTEX(cf, BELOW, LEFT, cf->xb, yb);
new_tristrip(&tlist, cf, cf->xb, yb);
}
VERTEX(edge, BELOW, RIGHT, xb, yb);
edge->outp[ABOVE]= NULL;
break;
case IMX:
VERTEX(edge, BELOW, LEFT, xb, yb);
edge->outp[ABOVE]= NULL;
cft= IMX;
break;
case IMM:
VERTEX(edge, BELOW, LEFT, xb, yb);
edge->outp[ABOVE]= cf->outp[ABOVE];
if (xb != cf->xb)
VERTEX(cf, ABOVE, RIGHT, xb, yb);
cf= edge;
break;
case EMM:
VERTEX(edge, BELOW, RIGHT, xb, yb);
edge->outp[ABOVE]= NULL;
new_tristrip(&tlist, edge, xb, yb);
cf= edge;
break;
case LED:
if (edge->bot.y == yb)
VERTEX(edge, BELOW, LEFT, xb, yb);
edge->outp[ABOVE]= edge->outp[BELOW];
cf= edge;
cft= LED;
break;
case RED:
edge->outp[ABOVE]= cf->outp[ABOVE];
if (cft == LED)
{
if (cf->bot.y == yb)
{
VERTEX(edge, BELOW, RIGHT, xb, yb);
}
else
{
if (edge->bot.y == yb)
{
VERTEX(cf, BELOW, LEFT, cf->xb, yb);
VERTEX(edge, BELOW, RIGHT, xb, yb);
}
}
}
else
{
VERTEX(edge, BELOW, RIGHT, xb, yb);
VERTEX(edge, ABOVE, RIGHT, xb, yb);
}
cf= NULL;
break;
default:
break;
} /* End of switch */
} /* End of contributing conditional */
} /* End of edge exists conditional */
} /* End of AET loop */
/* Delete terminating edges from the AET, otherwise compute xt */
for (edge= aet; edge; edge= edge->next)
{
if (edge->top.y == yb)
{
prev_edge= edge->prev;
next_edge= edge->next;
if (prev_edge)
prev_edge->next= next_edge;
else
aet= next_edge;
if (next_edge)
next_edge->prev= prev_edge;
/* Copy bundle head state to the adjacent tail edge if required */
if ((edge->bstate[BELOW] == BUNDLE_HEAD) && prev_edge)
{
if (prev_edge->bstate[BELOW] == BUNDLE_TAIL)
{
prev_edge->outp[BELOW]= edge->outp[BELOW];
prev_edge->bstate[BELOW]= UNBUNDLED;
if (prev_edge->prev)
if (prev_edge->prev->bstate[BELOW] == BUNDLE_TAIL)
prev_edge->bstate[BELOW]= BUNDLE_HEAD;
}
}
}
else
{
if (edge->top.y == yt)
edge->xt= edge->top.x;
else
edge->xt= edge->bot.x + edge->dx * (yt - edge->bot.y);
}
}
if (scanbeam < sbt_entries)
{
/* === SCANBEAM INTERIOR PROCESSING ============================== */
build_intersection_table(&it, aet, dy);
/* Process each node in the intersection table */
for (intersect= it; intersect; intersect= intersect->next)
{
e0= intersect->ie[0];
e1= intersect->ie[1];
/* Only generate output for contributing intersections */
if ((e0->bundle[ABOVE][CLIP] || e0->bundle[ABOVE][SUBJ])
&& (e1->bundle[ABOVE][CLIP] || e1->bundle[ABOVE][SUBJ]))
{
p= e0->outp[ABOVE];
q= e1->outp[ABOVE];
ix= intersect->point.x;
iy= intersect->point.y + yb;
in[CLIP]= ( e0->bundle[ABOVE][CLIP] && !e0->bside[CLIP])
|| ( e1->bundle[ABOVE][CLIP] && e1->bside[CLIP])
|| (!e0->bundle[ABOVE][CLIP] && !e1->bundle[ABOVE][CLIP]
&& e0->bside[CLIP] && e1->bside[CLIP]);
in[SUBJ]= ( e0->bundle[ABOVE][SUBJ] && !e0->bside[SUBJ])
|| ( e1->bundle[ABOVE][SUBJ] && e1->bside[SUBJ])
|| (!e0->bundle[ABOVE][SUBJ] && !e1->bundle[ABOVE][SUBJ]
&& e0->bside[SUBJ] && e1->bside[SUBJ]);
/* Determine quadrant occupancies */
switch (op)
{
case GPC_DIFF:
case GPC_INT:
tr= (in[CLIP])
&& (in[SUBJ]);
tl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP])
&& (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ]);
br= (in[CLIP] ^ e0->bundle[ABOVE][CLIP])
&& (in[SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
bl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP] ^ e0->bundle[ABOVE][CLIP])
&& (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
break;
case GPC_XOR:
tr= (in[CLIP])
^ (in[SUBJ]);
tl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP])
^ (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ]);
br= (in[CLIP] ^ e0->bundle[ABOVE][CLIP])
^ (in[SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
bl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP] ^ e0->bundle[ABOVE][CLIP])
^ (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
break;
case GPC_UNION:
tr= (in[CLIP])
|| (in[SUBJ]);
tl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP])
|| (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ]);
br= (in[CLIP] ^ e0->bundle[ABOVE][CLIP])
|| (in[SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
bl= (in[CLIP] ^ e1->bundle[ABOVE][CLIP] ^ e0->bundle[ABOVE][CLIP])
|| (in[SUBJ] ^ e1->bundle[ABOVE][SUBJ] ^ e0->bundle[ABOVE][SUBJ]);
break;
}
vclass= tr + (tl << 1) + (br << 2) + (bl << 3);
switch (vclass)
{
case EMN:
new_tristrip(&tlist, e1, ix, iy);
e0->outp[ABOVE]= e1->outp[ABOVE];
break;
case ERI:
if (p)
{
P_EDGE(prev_edge, e0, ABOVE, px, iy);
VERTEX(prev_edge, ABOVE, LEFT, px, iy);
VERTEX(e0, ABOVE, RIGHT, ix, iy);
e1->outp[ABOVE]= e0->outp[ABOVE];
e0->outp[ABOVE]= NULL;
}
break;
case ELI:
if (q)
{
N_EDGE(next_edge, e1, ABOVE, nx, iy);
VERTEX(e1, ABOVE, LEFT, ix, iy);
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
e0->outp[ABOVE]= e1->outp[ABOVE];
e1->outp[ABOVE]= NULL;
}
break;
case EMX:
if (p && q)
{
VERTEX(e0, ABOVE, LEFT, ix, iy);
e0->outp[ABOVE]= NULL;
e1->outp[ABOVE]= NULL;
}
break;
case IMN:
P_EDGE(prev_edge, e0, ABOVE, px, iy);
VERTEX(prev_edge, ABOVE, LEFT, px, iy);
N_EDGE(next_edge, e1, ABOVE, nx, iy);
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
new_tristrip(&tlist, prev_edge, px, iy);
e1->outp[ABOVE]= prev_edge->outp[ABOVE];
VERTEX(e1, ABOVE, RIGHT, ix, iy);
new_tristrip(&tlist, e0, ix, iy);
next_edge->outp[ABOVE]= e0->outp[ABOVE];
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
break;
case ILI:
if (p)
{
VERTEX(e0, ABOVE, LEFT, ix, iy);
N_EDGE(next_edge, e1, ABOVE, nx, iy);
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
e1->outp[ABOVE]= e0->outp[ABOVE];
e0->outp[ABOVE]= NULL;
}
break;
case IRI:
if (q)
{
VERTEX(e1, ABOVE, RIGHT, ix, iy);
P_EDGE(prev_edge, e0, ABOVE, px, iy);
VERTEX(prev_edge, ABOVE, LEFT, px, iy);
e0->outp[ABOVE]= e1->outp[ABOVE];
e1->outp[ABOVE]= NULL;
}
break;
case IMX:
if (p && q)
{
VERTEX(e0, ABOVE, RIGHT, ix, iy);
VERTEX(e1, ABOVE, LEFT, ix, iy);
e0->outp[ABOVE]= NULL;
e1->outp[ABOVE]= NULL;
P_EDGE(prev_edge, e0, ABOVE, px, iy);
VERTEX(prev_edge, ABOVE, LEFT, px, iy);
new_tristrip(&tlist, prev_edge, px, iy);
N_EDGE(next_edge, e1, ABOVE, nx, iy);
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
next_edge->outp[ABOVE]= prev_edge->outp[ABOVE];
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
}
break;
case IMM:
if (p && q)
{
VERTEX(e0, ABOVE, RIGHT, ix, iy);
VERTEX(e1, ABOVE, LEFT, ix, iy);
P_EDGE(prev_edge, e0, ABOVE, px, iy);
VERTEX(prev_edge, ABOVE, LEFT, px, iy);
new_tristrip(&tlist, prev_edge, px, iy);
N_EDGE(next_edge, e1, ABOVE, nx, iy);
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
e1->outp[ABOVE]= prev_edge->outp[ABOVE];
VERTEX(e1, ABOVE, RIGHT, ix, iy);
new_tristrip(&tlist, e0, ix, iy);
next_edge->outp[ABOVE]= e0->outp[ABOVE];
VERTEX(next_edge, ABOVE, RIGHT, nx, iy);
}
break;
case EMM:
if (p && q)
{
VERTEX(e0, ABOVE, LEFT, ix, iy);
new_tristrip(&tlist, e1, ix, iy);
e0->outp[ABOVE]= e1->outp[ABOVE];
}
break;
default:
break;
} /* End of switch */
} /* End of contributing intersection conditional */
/* Swap bundle sides in response to edge crossing */
if (e0->bundle[ABOVE][CLIP])
e1->bside[CLIP]= !e1->bside[CLIP];
if (e1->bundle[ABOVE][CLIP])
e0->bside[CLIP]= !e0->bside[CLIP];
if (e0->bundle[ABOVE][SUBJ])
e1->bside[SUBJ]= !e1->bside[SUBJ];
if (e1->bundle[ABOVE][SUBJ])
e0->bside[SUBJ]= !e0->bside[SUBJ];
/* Swap e0 and e1 bundles in the AET */
prev_edge= e0->prev;
next_edge= e1->next;
if (e1->next)
e1->next->prev= e0;
if (e0->bstate[ABOVE] == BUNDLE_HEAD)
{
search= TRUE;
while (search)
{
prev_edge= prev_edge->prev;
if (prev_edge)
{
if (prev_edge->bundle[ABOVE][CLIP]
|| prev_edge->bundle[ABOVE][SUBJ]
|| (prev_edge->bstate[ABOVE] == BUNDLE_HEAD))
search= FALSE;
}
else
search= FALSE;
}
}
if (!prev_edge)
{
e1->next= aet;
aet= e0->next;
}
else
{
e1->next= prev_edge->next;
prev_edge->next= e0->next;
}
e0->next->prev= prev_edge;
e1->next->prev= e1;
e0->next= next_edge;
} /* End of IT loop*/
/* Prepare for next scanbeam */
for (edge= aet; edge; edge= next_edge)
{
next_edge= edge->next;
succ_edge= edge->succ;
if ((edge->top.y == yt) && succ_edge)
{
/* Replace AET edge by its successor */
succ_edge->outp[BELOW]= edge->outp[ABOVE];
succ_edge->bstate[BELOW]= edge->bstate[ABOVE];
succ_edge->bundle[BELOW][CLIP]= edge->bundle[ABOVE][CLIP];
succ_edge->bundle[BELOW][SUBJ]= edge->bundle[ABOVE][SUBJ];
prev_edge= edge->prev;
if (prev_edge)
prev_edge->next= succ_edge;
else
aet= succ_edge;
if (next_edge)
next_edge->prev= succ_edge;
succ_edge->prev= prev_edge;
succ_edge->next= next_edge;
}
else
{
/* Update this edge */
edge->outp[BELOW]= edge->outp[ABOVE];
edge->bstate[BELOW]= edge->bstate[ABOVE];
edge->bundle[BELOW][CLIP]= edge->bundle[ABOVE][CLIP];
edge->bundle[BELOW][SUBJ]= edge->bundle[ABOVE][SUBJ];
edge->xb= edge->xt;
}
edge->outp[ABOVE]= NULL;
}
}
} /* === END OF SCANBEAM PROCESSING ================================== */
/* Generate result tristrip from tlist */
result->strip= NULL;
result->num_strips= count_tristrips(tlist);
if (result->num_strips > 0)
{
MALLOC(result->strip, result->num_strips * sizeof(gpc_vertex_list),
"tristrip list creation", gpc_vertex_list);
s= 0;
for (tn= tlist; tn; tn= tnn)
{
tnn= tn->next;
if (tn->active > 2)
{
/* Valid tristrip: copy the vertices and free the heap */
result->strip[s].num_vertices= tn->active;
MALLOC(result->strip[s].vertex, tn->active * sizeof(gpc_vertex),
"tristrip creation", gpc_vertex);
v= 0;
if (INVERT_TRISTRIPS)
{
lt= tn->v[RIGHT];
rt= tn->v[LEFT];
}
else
{
lt= tn->v[LEFT];
rt= tn->v[RIGHT];
}
while (lt || rt)
{
if (lt)
{
ltn= lt->next;
result->strip[s].vertex[v].x= lt->x;
result->strip[s].vertex[v].y= lt->y;
v++;
FREE(lt);
lt= ltn;
}
if (rt)
{
rtn= rt->next;
result->strip[s].vertex[v].x= rt->x;
result->strip[s].vertex[v].y= rt->y;
v++;
FREE(rt);
rt= rtn;
}
}
s++;
}
else
{
/* Invalid tristrip: just free the heap */
for (lt= tn->v[LEFT]; lt; lt= ltn)
{
ltn= lt->next;
FREE(lt);
}
for (rt= tn->v[RIGHT]; rt; rt=rtn)
{
rtn= rt->next;
FREE(rt);
}
}
FREE(tn);
}
}
/* Tidy up */
reset_it(&it);
reset_lmt(&lmt);
FREE(c_heap);
FREE(s_heap);
FREE(sbt);
}
/*
===========================================================================
End of file: gpc.c
===========================================================================
*/
| 67,922 | 25.688802 | 91 | cpp |
octomap | octomap-master/octovis/src/extern/QGLViewer/VRender/gpc.h | /*
This file is part of the VRender library.
Copyright (C) 2005 Cyril Soler ([email protected])
Version 1.0.0, released on June 27, 2005.
http://artis.imag.fr/Members/Cyril.Soler/VRender
VRender is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
VRender is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VRender; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/****************************************************************************
Copyright (C) 2002-2014 Gilles Debunne. All rights reserved.
This file is part of the QGLViewer library version 2.6.3.
http://www.libqglviewer.com - [email protected]
This file may be used under the terms of the GNU General Public License
versions 2.0 or 3.0 as published by the Free Software Foundation and
appearing in the LICENSE file included in the packaging of this file.
In addition, as a special exception, Gilles Debunne gives you certain
additional rights, described in the file GPL_EXCEPTION in this package.
libQGLViewer uses dual licensing. Commercial/proprietary software must
purchase a libQGLViewer Commercial License.
This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*****************************************************************************/
/*
===========================================================================
Project: Generic Polygon Clipper
A new algorithm for calculating the difference, intersection,
exclusive-or or union of arbitrary polygon sets.
File: gpc.h
Author: Alan Murta (email: [email protected])
Version: 2.32
Date: 17th December 2004
Copyright: (C) 1997-2004, Advanced Interfaces Group,
University of Manchester.
This software is free for non-commercial use. It may be copied,
modified, and redistributed provided that this copyright notice
is preserved on all copies. The intellectual property rights of
the algorithms used reside with the University of Manchester
Advanced Interfaces Group.
You may not use this software, in whole or in part, in support
of any commercial product without the express consent of the
author.
There is no warranty or other guarantee of fitness of this
software for any purpose. It is provided solely "as is".
===========================================================================
*/
#ifndef __gpc_h
#define __gpc_h
#include <stdio.h>
/*
===========================================================================
Constants
===========================================================================
*/
/* Increase GPC_EPSILON to encourage merging of near coincident edges */
//#define GPC_EPSILON (DBL_EPSILON)
#define GPC_EPSILON 1e-7
#define GPC_VERSION "2.32"
/*
===========================================================================
Public Data Types
===========================================================================
*/
typedef enum /* Set operation type */
{
GPC_DIFF, /* Difference */
GPC_INT, /* Intersection */
GPC_XOR, /* Exclusive or */
GPC_UNION /* Union */
} gpc_op;
typedef struct /* Polygon vertex structure */
{
double x; /* Vertex x component */
double y; /* vertex y component */
} gpc_vertex;
typedef struct /* Vertex list structure */
{
long num_vertices; /* Number of vertices in list */
gpc_vertex *vertex; /* Vertex array pointer */
} gpc_vertex_list;
typedef struct /* Polygon set structure */
{
unsigned long num_contours; /* Number of contours in polygon */
int *hole; /* Hole / external contour flags */
gpc_vertex_list *contour; /* Contour array pointer */
} gpc_polygon;
typedef struct /* Tristrip set structure */
{
unsigned long num_strips; /* Number of tristrips */
gpc_vertex_list *strip; /* Tristrip array pointer */
} gpc_tristrip;
/*
===========================================================================
Public Function Prototypes
===========================================================================
*/
void gpc_read_polygon (FILE *infile_ptr,
int read_hole_flags,
gpc_polygon *polygon);
void gpc_write_polygon (FILE *outfile_ptr,
int write_hole_flags,
gpc_polygon *polygon);
void gpc_add_contour (gpc_polygon *polygon,
gpc_vertex_list *contour,
int hole);
void gpc_polygon_clip (gpc_op set_operation,
gpc_polygon *subject_polygon,
gpc_polygon *clip_polygon,
gpc_polygon *result_polygon);
void gpc_tristrip_clip (gpc_op set_operation,
gpc_polygon *subject_polygon,
gpc_polygon *clip_polygon,
gpc_tristrip *result_tristrip);
void gpc_polygon_to_tristrip (gpc_polygon *polygon,
gpc_tristrip *tristrip);
void gpc_free_polygon (gpc_polygon *polygon);
void gpc_free_tristrip (gpc_tristrip *tristrip);
#endif
/*
===========================================================================
End of file: gpc.h
===========================================================================
*/
| 6,364 | 34.361111 | 78 | h |
octomap | octomap-master/scripts/increase_version.py | #!/usr/bin/env python
# Increases the version number of package.xml and CMakeLists.txt files in
# subfolders. The first argument specifies the version increase:
# major, minor, or patch (default, e.g. 1.6.2 --> 1.6.3)
#
# Borrows heaviliy from ROS / catkin release tools
import re
import sys
import copy
manifest_match = "<version>(\d+)\.(\d+)\.(\d+)</version>"
if __name__ == '__main__':
bump = "patch"
if len(sys.argv) > 1:
bump = sys.argv[1]
if bump not in {"major","minor","patch"}:
print sys.argv[0]+" [major|minor|patch] (default: patch)"
exit(-1)
manifests=["octomap/package.xml","octovis/package.xml","dynamicEDT3D/package.xml"]
cmakelists=["octomap/CMakeLists.txt","octovis/CMakeLists.txt","dynamicEDT3D/CMakeLists.txt"]
versions = []
# find versions in package.xml
for manifest in manifests:
with open(manifest, 'r') as f:
package_str = f.read()
match = re.search(manifest_match, package_str)
if match is None:
print "Error: no version tag found in %s" % manifest
exit(-1)
else:
v= match.groups()
v = [int(x) for x in v]
versions.append(v)
# find version in CMakeLists:
for cmake in cmakelists:
with open(cmake, 'r') as f:
cmake_str = f.read()
v = []
for m in ["MAJOR","MINOR","PATCH"]:
searchstr = "_%s_VERSION (\d+)\)" % m
match = re.search(searchstr, cmake_str)
if match is None:
print "Error: no version tag %s found in %s" % (searchstr,cmake)
exit(-1)
v.append(int(match.group(1)))
versions.append(v)
new_version = copy.deepcopy(versions[0])
for v in versions:
if v != versions[0]:
print "Error: check current versions, mismatch: %d.%d.%d vs. %d.%d.%d" %(tuple(v)+tuple(versions[0]))
exit(-1)
print "OctoMap component versions found: %d.%d.%d" % tuple(versions[0])
# "bump version" from catkin:
# find the desired index
idx = dict(major=0, minor=1, patch=2)[bump]
# increment the desired part
new_version[idx] += 1
# reset all parts behind the bumped part
new_version = new_version[:idx + 1] + [0 for x in new_version[idx + 1:]]
new_version_str = "%d.%d.%d" % tuple(new_version)
print 'Updating to new version: %s\n' % new_version_str
# adjust CMakeLists
for cmake in cmakelists:
with open(cmake, 'r') as f:
cmake_str = f.read()
idx = dict(MAJOR=0, MINOR=1, PATCH=2)
for m in ["MAJOR","MINOR","PATCH"]:
old_str = "_%s_VERSION %d)" % (m,versions[0][idx[m]])
new_str = "_%s_VERSION %d)" % (m,new_version[idx[m]])
cmake_str = cmake_str.replace(old_str, new_str)
with open(cmake, 'w') as f:
f.write(cmake_str)
# adjust package.xml
for manifest in manifests:
with open(manifest, 'r') as f:
package_str = f.read()
old_str = "<version>%d.%d.%d</version>" % tuple(versions[0])
new_str = "<version>%s</version>" % new_version_str
new_package_str = package_str.replace(old_str, new_str)
with open(manifest, 'w') as f:
f.write(new_package_str)
print "Finished writing package.xml and CMakeLists.txt files.\n"
print "Now check the output, adjust CHANGELOG, and \"git commit\".\nFinally, run:"
print " git checkout master && git merge --no-ff devel && git tag v%s" % new_version_str
print " git push origin master devel && git push --tags"
print "\n(adjust when not on the \"devel\" branch)\n"
| 3,451 | 29.548673 | 107 | py |
octomap | octomap-master/scripts/travis_build_jobs.sh | #!/bin/bash
# travis build script for test compilations
set -e
function build {
cd $1
mkdir build
cd build
cmake .. -DCMAKE_INSTALL_PREFIX=/tmp/octomap/$1
make -j4
cd ..
}
case "$1" in
"dist")
build .
cd build && make test
make install
;;
"components")
build octomap
cd build && make test
make install
cd ../..
build dynamicEDT3D
cd ..
build octovis
cd ..
;;
*)
echo "Invalid build variant"
exit 1
esac
| 453 | 10.35 | 49 | sh |
mmsegmentation | mmsegmentation-master/.owners.yml | assign:
strategy:
# random
# round-robin
daily-shift-based
assignees:
- csatsurnh
- xiexinch
- MeowZheng
- csatsurnh
- xiexinch
| 164 | 12.75 | 21 | yml |
mmsegmentation | mmsegmentation-master/.pre-commit-config.yaml | repos:
- repo: https://github.com/PYCQA/flake8.git
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/zhouzaida/isort
rev: 5.12.1
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.32.0
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.9
hooks:
- id: mdformat
args: ["--number"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/codespell-project/codespell
rev: v2.2.1
hooks:
- id: codespell
- repo: https://github.com/myint/docformatter
rev: v1.3.1
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
# - repo: local
# hooks:
# - id: update-model-index
# name: update-model-index
# description: Collect model information and update model-index.yml
# entry: .dev/md2yml.py
# additional_dependencies: [mmcv, lxml, opencv-python]
# language: python
# files: ^configs/.*\.md$
# require_serial: true
- repo: https://github.com/open-mmlab/pre-commit-hooks
rev: v0.2.0 # Use the rev to fix revision
hooks:
- id: check-algo-readme
- id: check-copyright
args: ["mmseg", "tools", "tests", "demo"] # the dir_to_check with expected directory to check
| 1,830 | 29.016393 | 102 | yaml |
mmsegmentation | mmsegmentation-master/.readthedocs.yml | version: 2
formats: all
python:
version: 3.7
install:
- requirements: requirements/docs.txt
- requirements: requirements/readthedocs.txt
| 151 | 14.2 | 48 | yml |
mmsegmentation | mmsegmentation-master/LICENSES.md | # Licenses for special features
In this file, we list the features with other licenses instead of Apache 2.0. Users should be careful about adopting these features in any commercial matters.
| Feature | Files | License |
| :-------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: |
| SegFormer | [mmseg/models/decode_heads/segformer_head.py](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/segformer_head.py) | [NVIDIA License](https://github.com/NVlabs/SegFormer#license) |
| DEST | [mmseg/models/backbones/smit.py](https://github.com/open-mmlab/mmsegmentation/blob/master/projects/dest/models/smit.py) [mmseg/models/decode_heads/dest_head.py](https://github.com/open-mmlab/mmsegmentation/blob/master/projects/dest/models/dest_head.py) | [NVIDIA License](https://github.com/NVIDIA/DL4AGX/blob/master/DEST/LICENSE) |
| 1,581 | 174.777778 | 346 | md |
mmsegmentation | mmsegmentation-master/README.md | <div align="center">
<img src="resources/mmseg-logo.png" width="600"/>
<div> </div>
<div align="center">
<b><font size="5">OpenMMLab website</font></b>
<sup>
<a href="https://openmmlab.com">
<i><font size="4">HOT</font></i>
</a>
</sup>
<b><font size="5">OpenMMLab platform</font></b>
<sup>
<a href="https://platform.openmmlab.com">
<i><font size="4">TRY IT OUT</font></i>
</a>
</sup>
</div>
<div> </div>
<br />
[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mmsegmentation)](https://pypi.org/project/mmsegmentation/)
[![PyPI](https://img.shields.io/pypi/v/mmsegmentation)](https://pypi.org/project/mmsegmentation)
[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmsegmentation.readthedocs.io/en/latest/)
[![badge](https://github.com/open-mmlab/mmsegmentation/workflows/build/badge.svg)](https://github.com/open-mmlab/mmsegmentation/actions)
[![codecov](https://codecov.io/gh/open-mmlab/mmsegmentation/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmsegmentation)
[![license](https://img.shields.io/github/license/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/blob/master/LICENSE)
[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues)
[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues)
[📘Documentation](https://mmsegmentation.readthedocs.io/en/latest/) |
[🛠️Installation](https://mmsegmentation.readthedocs.io/en/latest/get_started.html) |
[👀Model Zoo](https://mmsegmentation.readthedocs.io/en/latest/model_zoo.html) |
[🆕Update News](https://mmsegmentation.readthedocs.io/en/latest/changelog.html) |
[🤔Reporting Issues](https://github.com/open-mmlab/mmsegmentation/issues/new/choose)
</div>
<div align="center">
English | [简体中文](README_zh-CN.md)
</div>
<div align="center">
<a href="https://openmmlab.medium.com/" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218352562-cdded397-b0f3-4ca1-b8dd-a60df8dca75b.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://discord.gg/raweFPmdzG" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
</div>
## Introduction
MMSegmentation is an open source semantic segmentation library based on PyTorch.
It is a part of the [OpenMMLab](https://openmmlab.com/) project.
The master branch works with **PyTorch 1.5+**.
![demo image](resources/seg_demo.gif)
<details open>
<summary>Major features</summary>
- **Unified Benchmark**
We provide a unified benchmark toolbox for various semantic segmentation methods.
- **Modular Design**
We decompose the semantic segmentation framework into different components and one can easily construct a customized semantic segmentation framework by combining different modules.
- **Support of multiple methods out of box**
The toolbox directly supports popular and contemporary semantic segmentation frameworks, *e.g.* PSPNet, DeepLabV3, PSANet, DeepLabV3+, etc.
- **High efficiency**
The training speed is faster than or comparable to other codebases.
</details>
## What's New
### 💎 Stable version
v0.30.0 was released on 01/11/2023:
- Add 'Projects/' folder, and the first example project
- Support Delving into High-Quality Synthetic Face Occlusion Segmentation Datasets
Please refer to [changelog.md](docs/en/changelog.md) for details and release history.
### 🌟 Preview of 1.x version
A brand new version of **MMSegmentation v1.0.0rc3** was released in 12/31/2022:
- Unifies interfaces of all components based on [MMEngine](https://github.com/open-mmlab/mmengine).
- Faster training and testing speed with complete support of mixed precision training.
- Refactored and more flexible [architecture](https://mmsegmentation.readthedocs.io/en/1.x/overview.html).
Find more new features in [1.x branch](https://github.com/open-mmlab/mmsegmentation/tree/1.x). Issues and PRs are welcome!
## Installation
Please refer to [get_started.md](docs/en/get_started.md#installation) for installation and [dataset_prepare.md](docs/en/dataset_prepare.md#prepare-datasets) for dataset preparation.
## Get Started
Please see [train.md](docs/en/train.md) and [inference.md](docs/en/inference.md) for the basic usage of MMSegmentation.
There are also tutorials for:
- [customizing dataset](docs/en/tutorials/customize_datasets.md)
- [designing data pipeline](docs/en/tutorials/data_pipeline.md)
- [customizing modules](docs/en/tutorials/customize_models.md)
- [customizing runtime](docs/en/tutorials/customize_runtime.md)
- [training tricks](docs/en/tutorials/training_tricks.md)
- [useful tools](docs/en/useful_tools.md)
A Colab tutorial is also provided. You may preview the notebook [here](demo/MMSegmentation_Tutorial.ipynb) or directly [run](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/master/demo/MMSegmentation_Tutorial.ipynb) on Colab.
## Benchmark and model zoo
Results and models are available in the [model zoo](docs/en/model_zoo.md).
Supported backbones:
- [x] ResNet (CVPR'2016)
- [x] ResNeXt (CVPR'2017)
- [x] [HRNet (CVPR'2019)](configs/hrnet)
- [x] [ResNeSt (ArXiv'2020)](configs/resnest)
- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2)
- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3)
- [x] [Vision Transformer (ICLR'2021)](configs/vit)
- [x] [Swin Transformer (ICCV'2021)](configs/swin)
- [x] [Twins (NeurIPS'2021)](configs/twins)
- [x] [BEiT (ICLR'2022)](configs/beit)
- [x] [ConvNeXt (CVPR'2022)](configs/convnext)
- [x] [MAE (CVPR'2022)](configs/mae)
- [x] [PoolFormer (CVPR'2022)](configs/poolformer)
- [x] [SegNeXt (NeurIPS'2022)](configs/segnext)
Supported methods:
- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn)
- [x] [ERFNet (T-ITS'2017)](configs/erfnet)
- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet)
- [x] [PSPNet (CVPR'2017)](configs/pspnet)
- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3)
- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1)
- [x] [PSANet (ECCV'2018)](configs/psanet)
- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus)
- [x] [UPerNet (ECCV'2018)](configs/upernet)
- [x] [ICNet (ECCV'2018)](configs/icnet)
- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net)
- [x] [EncNet (CVPR'2018)](configs/encnet)
- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn)
- [x] [DANet (CVPR'2019)](configs/danet)
- [x] [APCNet (CVPR'2019)](configs/apcnet)
- [x] [EMANet (ICCV'2019)](configs/emanet)
- [x] [CCNet (ICCV'2019)](configs/ccnet)
- [x] [DMNet (ICCV'2019)](configs/dmnet)
- [x] [ANN (ICCV'2019)](configs/ann)
- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet)
- [x] [FastFCN (ArXiv'2019)](configs/fastfcn)
- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn)
- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet)
- [x] [OCRNet (ECCV'2020)](configs/ocrnet)
- [x] [DNLNet (ECCV'2020)](configs/dnlnet)
- [x] [PointRend (CVPR'2020)](configs/point_rend)
- [x] [CGNet (TIP'2020)](configs/cgnet)
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)
- [x] [STDC (CVPR'2021)](configs/stdc)
- [x] [SETR (CVPR'2021)](configs/setr)
- [x] [DPT (ArXiv'2021)](configs/dpt)
- [x] [Segmenter (ICCV'2021)](configs/segmenter)
- [x] [SegFormer (NeurIPS'2021)](configs/segformer)
- [x] [K-Net (NeurIPS'2021)](configs/knet)
- [x] [DEST (CVPRW'2022)](projects/dest)
Supported datasets:
- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#cityscapes)
- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#pascal-voc)
- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#ade20k)
- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#pascal-context)
- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#coco-stuff-10k)
- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#coco-stuff-164k)
- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#chase-db1)
- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#drive)
- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#hrf)
- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#stare)
- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#dark-zurich)
- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#nighttime-driving)
- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#loveda)
- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isprs-potsdam)
- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isprs-vaihingen)
- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isaid)
- [x] [High quality synthetic face occlusion](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#delving-into-high-quality-synthetic-face-occlusion-segmentation-datasets)
- [x] [ImageNetS](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#imagenets)
## FAQ
Please refer to [FAQ](docs/en/faq.md) for frequently asked questions.
## Contributing
We appreciate all contributions to improve MMSegmentation. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
## Acknowledgement
MMSegmentation is an open source project that welcome any contribution and feedback.
We wish that the toolbox and benchmark could serve the growing research
community by providing a flexible as well as standardized toolkit to reimplement existing methods
and develop their own new semantic segmentation methods.
## Citation
If you find this project useful in your research, please consider cite:
```bibtex
@misc{mmseg2020,
title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark},
author={MMSegmentation Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}},
year={2020}
}
```
## License
MMSegmentation is released under the Apache 2.0 license, while some specific features in this library are with other licenses. Please refer to [LICENSES.md](LICENSES.md) for the careful check, if you are using our code for commercial matters.
## Projects in OpenMMLab
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark.
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework.
| 13,936 | 51.992395 | 249 | md |
mmsegmentation | mmsegmentation-master/README_zh-CN.md | <div align="center">
<img src="resources/mmseg-logo.png" width="600"/>
<div> </div>
<div align="center">
<b><font size="5">OpenMMLab 官网</font></b>
<sup>
<a href="https://openmmlab.com">
<i><font size="4">HOT</font></i>
</a>
</sup>
<b><font size="5">OpenMMLab 开放平台</font></b>
<sup>
<a href="https://platform.openmmlab.com">
<i><font size="4">TRY IT OUT</font></i>
</a>
</sup>
</div>
<div> </div>
<br />
[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mmsegmentation)](https://pypi.org/project/mmsegmentation/)
[![PyPI](https://img.shields.io/pypi/v/mmsegmentation)](https://pypi.org/project/mmsegmentation)
[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmsegmentation.readthedocs.io/zh_CN/latest/)
[![badge](https://github.com/open-mmlab/mmsegmentation/workflows/build/badge.svg)](https://github.com/open-mmlab/mmsegmentation/actions)
[![codecov](https://codecov.io/gh/open-mmlab/mmsegmentation/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmsegmentation)
[![license](https://img.shields.io/github/license/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/blob/master/LICENSE)
[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues)
[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues)
[📘使用文档](https://mmsegmentation.readthedocs.io/en/latest/) |
[🛠️安装指南](https://mmsegmentation.readthedocs.io/en/latest/get_started.html) |
[👀模型库](https://mmsegmentation.readthedocs.io/en/latest/model_zoo.html) |
[🆕更新日志](https://mmsegmentation.readthedocs.io/en/latest/changelog.html) |
[🤔报告问题](https://github.com/open-mmlab/mmsegmentation/issues/new/choose)
[English](README.md) | 简体中文
</div>
## 简介
MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 OpenMMLab 项目的一部分。
主分支代码目前支持 PyTorch 1.5 以上的版本。
![示例图片](resources/seg_demo.gif)
<details open>
<summary>Major features</summary>
### 主要特性
- **统一的基准平台**
我们将各种各样的语义分割算法集成到了一个统一的工具箱,进行基准测试。
- **模块化设计**
MMSegmentation 将分割框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的分割模型。
- **丰富的即插即用的算法和模型**
MMSegmentation 支持了众多主流的和最新的检测算法,例如 PSPNet,DeepLabV3,PSANet,DeepLabV3+ 等.
- **速度快**
训练速度比其他语义分割代码库更快或者相当。
</details>
## 最新进展
### 💎 稳定版本
最新版本 v0.30.0 在 2023.01.11 发布:
- 添加 projects 目录以及示例项目
- 支持高质量合成人脸数据集
如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/en/changelog.md)。
### 🌟 1.x 预览版本
全新的 **MMSegmentation v1.0.0rc3** 版本已经在 2022.12.31 发布:
- 基于 [MMEngine](https://github.com/open-mmlab/mmengine) 统一了各组件接口。
- 全面支持混合精度,训练测试速度更快。
- 重构之后更加灵活的[架构](https://mmsegmentation.readthedocs.io/en/1.x/overview.html)。
欢迎在 [1.x branch](https://github.com/open-mmlab/mmsegmentation/tree/1.x) 发现更多的新特性。欢迎 issue 和 PR。
## 安装
请参考[快速入门文档](docs/zh_cn/get_started.md#installation)进行安装,参考[数据集准备](docs/zh_cn/dataset_prepare.md)处理数据。
## 快速入门
请参考[训练教程](docs/zh_cn/train.md)和[测试教程](docs/zh_cn/inference.md)学习 MMSegmentation 的基本使用。
我们也提供了一些进阶教程,内容覆盖了:
- [增加自定义数据集](docs/zh_cn/tutorials/customize_datasets.md)
- [设计新的数据预处理流程](docs/zh_cn/tutorials/data_pipeline.md)
- [增加自定义模型](docs/zh_cn/tutorials/customize_models.md)
- [增加自定义的运行时配置](docs/zh_cn/tutorials/customize_runtime.md)。
- [训练技巧说明](docs/zh_cn/tutorials/training_tricks.md)
- [有用的工具](docs/zh_cn/useful_tools.md)。
同时,我们提供了 Colab 教程。你可以在[这里](demo/MMSegmentation_Tutorial.ipynb)浏览教程,或者直接在 Colab 上[运行](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/master/demo/MMSegmentation_Tutorial.ipynb)。
## 基准测试和模型库
测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。
已支持的骨干网络:
- [x] ResNet (CVPR'2016)
- [x] ResNeXt (CVPR'2017)
- [x] [HRNet (CVPR'2019)](configs/hrnet)
- [x] [ResNeSt (ArXiv'2020)](configs/resnest)
- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2)
- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3)
- [x] [Vision Transformer (ICLR'2021)](configs/vit)
- [x] [Swin Transformer (ICCV'2021)](configs/swin)
- [x] [Twins (NeurIPS'2021)](configs/twins)
- [x] [BEiT (ICLR'2022)](configs/beit)
- [x] [ConvNeXt (CVPR'2022)](configs/convnext)
- [x] [MAE (CVPR'2022)](configs/mae)
- [x] [PoolFormer (CVPR'2022)](configs/poolformer)
- [x] [SegNeXt (NeurIPS'2022)](configs/segnext)
已支持的算法:
- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn)
- [x] [ERFNet (T-ITS'2017)](configs/erfnet)
- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet)
- [x] [PSPNet (CVPR'2017)](configs/pspnet)
- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3)
- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1)
- [x] [PSANet (ECCV'2018)](configs/psanet)
- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus)
- [x] [UPerNet (ECCV'2018)](configs/upernet)
- [x] [ICNet (ECCV'2018)](configs/icnet)
- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net)
- [x] [EncNet (CVPR'2018)](configs/encnet)
- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn)
- [x] [DANet (CVPR'2019)](configs/danet)
- [x] [APCNet (CVPR'2019)](configs/apcnet)
- [x] [EMANet (ICCV'2019)](configs/emanet)
- [x] [CCNet (ICCV'2019)](configs/ccnet)
- [x] [DMNet (ICCV'2019)](configs/dmnet)
- [x] [ANN (ICCV'2019)](configs/ann)
- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet)
- [x] [FastFCN (ArXiv'2019)](configs/fastfcn)
- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn)
- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet)
- [x] [OCRNet (ECCV'2020)](configs/ocrnet)
- [x] [DNLNet (ECCV'2020)](configs/dnlnet)
- [x] [PointRend (CVPR'2020)](configs/point_rend)
- [x] [CGNet (TIP'2020)](configs/cgnet)
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)
- [x] [STDC (CVPR'2021)](configs/stdc)
- [x] [SETR (CVPR'2021)](configs/setr)
- [x] [DPT (ArXiv'2021)](configs/dpt)
- [x] [Segmenter (ICCV'2021)](configs/segmenter)
- [x] [SegFormer (NeurIPS'2021)](configs/segformer)
- [x] [K-Net (NeurIPS'2021)](configs/knet)
已支持的数据集:
- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#cityscapes)
- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#pascal-voc)
- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#ade20k)
- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#pascal-context)
- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#coco-stuff-10k)
- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#coco-stuff-164k)
- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#chase-db1)
- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#drive)
- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#hrf)
- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#stare)
- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#dark-zurich)
- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#nighttime-driving)
- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#loveda)
- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#isprs-potsdam)
- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#isprs-vaihingen)
- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#isaid)
- [x] [ImageNetS](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#imagenets)
## 常见问题
如果遇到问题,请参考 [常见问题解答](docs/zh_cn/faq.md)。
## 贡献指南
我们感谢所有的贡献者为改进和提升 MMSegmentation 所作出的努力。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。
## 致谢
MMSegmentation 是一个由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。
## 引用
如果你觉得本项目对你的研究工作有所帮助,请参考如下 bibtex 引用 MMSegmentation。
```bibtex
@misc{mmseg2020,
title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark},
author={MMSegmentation Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}},
year={2020}
}
```
## 开源许可证
`MMSegmentation` 目前以 Apache 2.0 的许可证发布,但是其中有一部分功能并不是使用的 Apache2.0 许可证,我们在 [许可证](LICENSES.md) 中详细地列出了这些功能以及他们对应的许可证,如果您正在从事盈利性活动,请谨慎参考此文档。
## OpenMMLab 的其他项目
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱和基准测试
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
## 欢迎加入 OpenMMLab 社区
扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 [OpenMMLab 团队](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) 以及 [MMSegmentation](https://jq.qq.com/?_wv=1027&k=9sprS2YO) 的 QQ 群。
<div align="center">
<img src="docs/zh_cn/imgs/zhihu_qrcode.jpg" height="400" /> <img src="docs/zh_cn/imgs/qq_group_qrcode.jpg" height="400" />
</div>
我们会在 OpenMMLab 社区为大家
- 📢 分享 AI 框架的前沿核心技术
- 💻 解读 PyTorch 常用模块源码
- 📰 发布 OpenMMLab 的相关新闻
- 🚀 介绍 OpenMMLab 开发的前沿算法
- 🏃 获取更高效的问题答疑和意见反馈
- 🔥 提供与各行各业开发者充分交流的平台
干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
| 10,804 | 40.557692 | 200 | md |
mmsegmentation | mmsegmentation-master/model-index.yml | Import:
- configs/ann/ann.yml
- configs/apcnet/apcnet.yml
- configs/beit/beit.yml
- configs/bisenetv1/bisenetv1.yml
- configs/bisenetv2/bisenetv2.yml
- configs/ccnet/ccnet.yml
- configs/cgnet/cgnet.yml
- configs/convnext/convnext.yml
- configs/danet/danet.yml
- configs/deeplabv3/deeplabv3.yml
- configs/deeplabv3plus/deeplabv3plus.yml
- configs/dmnet/dmnet.yml
- configs/dnlnet/dnlnet.yml
- configs/dpt/dpt.yml
- configs/emanet/emanet.yml
- configs/encnet/encnet.yml
- configs/erfnet/erfnet.yml
- configs/fastfcn/fastfcn.yml
- configs/fastscnn/fastscnn.yml
- configs/fcn/fcn.yml
- configs/gcnet/gcnet.yml
- configs/hrnet/hrnet.yml
- configs/icnet/icnet.yml
- configs/isanet/isanet.yml
- configs/knet/knet.yml
- configs/mae/mae.yml
- configs/mobilenet_v2/mobilenet_v2.yml
- configs/mobilenet_v3/mobilenet_v3.yml
- configs/nonlocal_net/nonlocal_net.yml
- configs/ocrnet/ocrnet.yml
- configs/point_rend/point_rend.yml
- configs/poolformer/poolformer.yml
- configs/psanet/psanet.yml
- configs/pspnet/pspnet.yml
- configs/resnest/resnest.yml
- configs/segformer/segformer.yml
- configs/segmenter/segmenter.yml
- configs/segnext/segnext.yml
- configs/sem_fpn/sem_fpn.yml
- configs/setr/setr.yml
- configs/stdc/stdc.yml
- configs/swin/swin.yml
- configs/twins/twins.yml
- configs/unet/unet.yml
- configs/upernet/upernet.yml
- configs/vit/vit.yml
- configs/imagenets/imagenets.yml
| 1,374 | 27.061224 | 41 | yml |
mmsegmentation | mmsegmentation-master/setup.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmseg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
yield from parse_require_file(target)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = op, version
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
yield from parse_line(line)
def gen_packages_items():
if not exists(require_fpath):
return
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(f';{platform_deps}')
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
# set `copy` mode here since symlink fails on Windows.
mode = 'copy' if platform.system() == 'Windows' else 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv or platform.system(
) == 'Windows':
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
# set `copy` mode here since symlink fails with WinError on Windows.
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmseg', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
# Creating a symbolic link on windows may raise an
# `OSError: [WinError 1314]` due to privilege. If
# the error happens, the src file will be copied
mode = 'copy'
warnings.warn(
f'Failed to create a symbolic link for {src_relpath},'
f' and it will be copied to {tar_path}')
else:
continue
if mode != 'copy':
raise ValueError(f'Invalid mode {mode}')
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmsegmentation',
version=get_version(),
description='Open MMLab Semantic Segmentation Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMSegmentation Contributors',
author_email='[email protected]',
keywords='computer vision, semantic segmentation',
url='http://github.com/open-mmlab/mmsegmentation',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
zip_safe=False)
| 7,221 | 36.811518 | 125 | py |
mmsegmentation | mmsegmentation-master/.circleci/config.yml | version: 2.1
# this allows you to use CircleCI's dynamic configuration feature
setup: true
# the path-filtering orb is required to continue a pipeline based on
# the path of an updated fileset
orbs:
path-filtering: circleci/[email protected]
workflows:
# the always-run workflow is always triggered, regardless of the pipeline parameters.
always-run:
jobs:
# the path-filtering/filter job determines which pipeline
# parameters to update.
- path-filtering/filter:
name: check-updated-files
# 3-column, whitespace-delimited mapping. One mapping per
# line:
# <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>
mapping: |
mmseg/.* lint_only false
requirements/.* lint_only false
tests/.* lint_only false
tools/.* lint_only false
configs/.* lint_only false
.circleci/.* lint_only false
base-revision: master
# this is the path of the configuration we should trigger once
# path filtering and pipeline parameter value updates are
# complete. In this case, we are using the parent dynamic
# configuration itself.
config-path: .circleci/test.yml
| 1,275 | 35.457143 | 87 | yml |
mmsegmentation | mmsegmentation-master/.circleci/test.yml |
version: 2.1
# the default pipeline parameters, which will be updated according to
# the results of the path-filtering orb
parameters:
lint_only:
type: boolean
default: true
jobs:
lint:
docker:
- image: cimg/python:3.7.4
steps:
- checkout
- run:
name: Install dependencies
command: |
sudo apt-add-repository ppa:brightbox/ruby-ng -y
sudo apt-get update
sudo apt-get install -y ruby2.7
- run:
name: Install pre-commit hook
command: |
pip install pre-commit
pre-commit install
- run:
name: Linting
command: pre-commit run --all-files
- run:
name: Check docstring coverage
command: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmseg
build_cpu:
parameters:
# The python version must match available image tags in
# https://circleci.com/developer/images/image/cimg/python
python:
type: string
default: "3.7.4"
torch:
type: string
torchvision:
type: string
docker:
- image: cimg/python:<< parameters.python >>
resource_class: large
steps:
- checkout
- run:
name: Get MMCV_TORCH as environment variables
command: |
. .circleci/scripts/get_mmcv_var.sh << parameters.torch >>
source $BASH_ENV
- run:
name: Install Libraries
command: |
sudo apt-get update
sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5
- run:
name: Configure Python & pip
command: |
python -m pip install --upgrade pip
python -m pip install wheel
- run:
name: Install PyTorch
command: |
python -V
python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
- run:
name: Install mmseg dependencies
command: |
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch<< parameters.torch >>/index.html
python -m pip install -r requirements.txt
python -m pip install albumentations>=0.3.2 --no-binary qudida,albumentations
- run:
name: Build and install
command: |
python -m pip install -e .
- run:
name: Run unittests but skip timm unittests
command: |
python -m coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
python -m coverage xml
python -m coverage report -m
build_cuda:
parameters:
torch:
type: string
cuda:
type: enum
enum: ["10.1", "10.2", "11.1"]
cudnn:
type: integer
default: 7
machine:
image: ubuntu-2004-cuda-11.4:202110-01
docker_layer_caching: true
resource_class: gpu.nvidia.small
steps:
- checkout
- run:
name: Get MMCV_TORCH and MMCV_CUDA as environment variables
command: |
. .circleci/scripts/get_mmcv_var.sh << parameters.torch >> << parameters.cuda >>
source $BASH_ENV
- run:
name: Build Docker image
command: |
docker build .circleci/docker -t mmseg:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
docker run --gpus all -t -d -v /home/circleci/project:/mmseg -w /mmseg --name mmseg mmseg:gpu
- run:
name: Install mmseg dependencies
command: |
docker exec mmseg pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/${MMCV_CUDA}/torch${MMCV_TORCH}/index.html
docker exec mmseg pip install -r requirements.txt
docker exec mmseg pip install typing-extensions -U
docker exec mmseg pip install albumentations --use-pep517 qudida albumentations
- run:
name: Build and install
command: |
docker exec mmseg pip install -e .
- run:
name: Run unittests but skip timm unittests
command: |
docker exec mmseg python -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
workflows:
pr_stage_lint:
when: << pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- master
pr_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- master
- build_cpu:
name: minimum_version_cpu
torch: 1.6.0
torchvision: 0.7.0
python: 3.7.7
requires:
- lint
- build_cpu:
name: maximum_version_cpu
torch: 1.9.0
torchvision: 0.10.0
python: 3.8.0
requires:
- minimum_version_cpu
- hold:
type: approval
requires:
- maximum_version_cpu
- build_cuda:
name: mainstream_version_gpu
torch: 1.8.1
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.2"
requires:
- hold
merge_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- build_cuda:
name: minimum_version_gpu
torch: 1.6.0
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.1"
filters:
branches:
only:
- master
| 6,184 | 30.556122 | 177 | yml |
mmsegmentation | mmsegmentation-master/.circleci/scripts/get_mmcv_var.sh | #!/bin/bash
TORCH=$1
CUDA=$2
# 10.2 -> cu102
MMCV_CUDA="cu`echo ${CUDA} | tr -d '.'`"
# MMCV only provides pre-compiled packages for torch 1.x.0
# which works for any subversions of torch 1.x.
# We force the torch version to be 1.x.0 to ease package searching
# and avoid unnecessary rebuild during MMCV's installation.
TORCH_VER_ARR=(${TORCH//./ })
TORCH_VER_ARR[2]=0
printf -v MMCV_TORCH "%s." "${TORCH_VER_ARR[@]}"
MMCV_TORCH=${MMCV_TORCH%?} # Remove the last dot
echo "export MMCV_CUDA=${MMCV_CUDA}" >> $BASH_ENV
echo "export MMCV_TORCH=${MMCV_TORCH}" >> $BASH_ENV
| 574 | 27.75 | 66 | sh |
mmsegmentation | mmsegmentation-master/.dev/batch_test_list.py | # yapf: disable
# Inference Speed is tested on NVIDIA V100
hrnet = [
dict(
config='configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py',
checkpoint='fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth', # noqa
eval='mIoU',
metric=dict(mIoU=33.0),
),
dict(
config='configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py',
checkpoint='fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth', # noqa
eval='mIoU',
metric=dict(mIoU=76.31),
),
dict(
config='configs/hrnet/fcn_hr48_512x512_160k_ade20k.py',
checkpoint='fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth',
eval='mIoU',
metric=dict(mIoU=42.02),
),
dict(
config='configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py',
checkpoint='fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth', # noqa
eval='mIoU',
metric=dict(mIoU=80.65),
),
]
pspnet = [
dict(
config='configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py',
checkpoint='pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth', # noqa
eval='mIoU',
metric=dict(mIoU=78.55),
),
dict(
config='configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py',
checkpoint='pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth', # noqa
eval='mIoU',
metric=dict(mIoU=79.76),
),
dict(
config='configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py',
checkpoint='pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth', # noqa
eval='mIoU',
metric=dict(mIoU=44.39),
),
dict(
config='configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py',
checkpoint='pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth', # noqa
eval='mIoU',
metric=dict(mIoU=42.48),
),
]
resnest = [
dict(
config='configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py',
checkpoint='pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth', # noqa
eval='mIoU',
metric=dict(mIoU=45.44),
),
dict(
config='configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py',
checkpoint='pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth', # noqa
eval='mIoU',
metric=dict(mIoU=78.57),
),
]
fastscnn = [
dict(
config='configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py',
checkpoint='fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth',
eval='mIoU',
metric=dict(mIoU=70.96),
)
]
deeplabv3plus = [
dict(
config='configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py', # noqa
checkpoint='deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth', # noqa
eval='mIoU',
metric=dict(mIoU=80.98),
),
dict(
config='configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py', # noqa
checkpoint='deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth', # noqa
eval='mIoU',
metric=dict(mIoU=80.97),
),
dict(
config='configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py', # noqa
checkpoint='deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth', # noqa
eval='mIoU',
metric=dict(mIoU=80.09),
),
dict(
config='configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py', # noqa
checkpoint='deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth', # noqa
eval='mIoU',
metric=dict(mIoU=79.83),
),
]
vit = [
dict(
config='configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py',
checkpoint='upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth',
eval='mIoU',
metric=dict(mIoU=47.73),
),
dict(
config='configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py',
checkpoint='upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth',
eval='mIoU',
metric=dict(mIoU=43.52),
),
]
fp16 = [
dict(
config='configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py', # noqa
checkpoint='deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth', # noqa
eval='mIoU',
metric=dict(mIoU=80.46),
)
]
swin = [
dict(
config='configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa
checkpoint='upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', # noqa
eval='mIoU',
metric=dict(mIoU=44.41),
)
]
# yapf: enable
| 4,856 | 35.246269 | 130 | py |
mmsegmentation | mmsegmentation-master/.dev/benchmark_evaluation.sh | PARTITION=$1
CHECKPOINT_DIR=$2
echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x512_160k_ade20k --cfg-options dist_params.port=28171 &
echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x1024_160k_cityscapes --cfg-options dist_params.port=28172 &
echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x512_160k_ade20k --cfg-options dist_params.port=28173 &
echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x1024_160k_cityscapes --cfg-options dist_params.port=28174 &
echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28175 &
echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28176 &
echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28177 &
echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x512_160k_ade20k --cfg-options dist_params.port=28178 &
echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28179 &
echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28180 &
echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py $CHECKPOINT_DIR/fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fast_scnn_lr0.12_8x4_160k_cityscapes --cfg-options dist_params.port=28181 &
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28182 &
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28183 &
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28184 &
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28185 &
echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_vit-b16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28186 &
echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_deit-s16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28187 &
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes-cc58bc8d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes --cfg-options dist_params.port=28188 &
echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py $CHECKPOINT_DIR/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K --cfg-options dist_params.port=28189 &
| 8,747 | 207.285714 | 517 | sh |
mmsegmentation | mmsegmentation-master/.dev/benchmark_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import hashlib
import logging
import os
import os.path as osp
import warnings
from argparse import ArgumentParser
import requests
from mmcv import Config
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
from mmseg.utils import get_root_logger
# ignore warnings when segmentors inference
warnings.filterwarnings('ignore')
def download_checkpoint(checkpoint_name, model_name, config_name, collect_dir):
"""Download checkpoint and check if hash code is true."""
url = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{checkpoint_name}' # noqa
r = requests.get(url)
assert r.status_code != 403, f'{url} Access denied.'
with open(osp.join(collect_dir, checkpoint_name), 'wb') as code:
code.write(r.content)
true_hash_code = osp.splitext(checkpoint_name)[0].split('-')[1]
# check hash code
with open(osp.join(collect_dir, checkpoint_name), 'rb') as fp:
sha256_cal = hashlib.sha256()
sha256_cal.update(fp.read())
cur_hash_code = sha256_cal.hexdigest()[:8]
assert true_hash_code == cur_hash_code, f'{url} download failed, '
'incomplete downloaded file or url invalid.'
if cur_hash_code != true_hash_code:
os.remove(osp.join(collect_dir, checkpoint_name))
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'-i', '--img', default='demo/demo.png', help='Image file')
parser.add_argument('-a', '--aug', action='store_true', help='aug test')
parser.add_argument('-m', '--model-name', help='model name to inference')
parser.add_argument(
'-s', '--show', action='store_true', help='show results')
parser.add_argument(
'-d', '--device', default='cuda:0', help='Device used for inference')
return parser.parse_args()
def inference_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
if 'flip' in cfg.data.test.pipeline[
1] and 'img_scale' in cfg.data.test.pipeline[1]:
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
elif logger is None:
print(f'{config_name}: unable to start aug test', flush=True)
else:
logger.error(f'{config_name}: unable to start aug test')
model = init_segmentor(cfg, checkpoint, device=args.device)
# test a single image
result = inference_segmentor(model, args.img)
# show the results
if args.show:
show_result_pyplot(model, args.img, result)
return result
# Sample test whether the inference code is correct
def main(args):
config = Config.fromfile(args.config)
if not os.path.exists(args.checkpoint_root):
os.makedirs(args.checkpoint_root, 0o775)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args)
except Exception:
print(f'{config_name} test failed!')
continue
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = get_root_logger(
log_file='benchmark_inference_image.log', log_level=logging.ERROR)
for model_name in config:
model_infos = config[model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_path = model_info['config'].strip()
config_name = osp.splitext(osp.basename(config_path))[0]
checkpoint_name = model_info['checkpoint'].strip()
checkpoint = osp.join(args.checkpoint_root, checkpoint_name)
# ensure checkpoint exists
try:
if not osp.exists(checkpoint):
download_checkpoint(checkpoint_name, model_name,
config_name.rstrip('.py'),
args.checkpoint_root)
except Exception:
logger.error(f'{checkpoint_name} download error')
continue
# test model inference with checkpoint
try:
# build the model from a config file and a checkpoint file
inference_model(config_path, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_path} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 5,429 | 35.689189 | 116 | py |
mmsegmentation | mmsegmentation-master/.dev/benchmark_train.sh | PARTITION=$1
echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24727 --work-dir work_dirs/hrnet/fcn_hr18s_512x512_160k_ade20k >/dev/null &
echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24728 --work-dir work_dirs/hrnet/fcn_hr18s_512x1024_160k_cityscapes >/dev/null &
echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24729 --work-dir work_dirs/hrnet/fcn_hr48_512x512_160k_ade20k >/dev/null &
echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24730 --work-dir work_dirs/hrnet/fcn_hr48_512x1024_160k_cityscapes >/dev/null &
echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24731 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes >/dev/null &
echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24732 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes >/dev/null &
echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24733 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x512_160k_ade20k >/dev/null &
echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24734 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x512_160k_ade20k >/dev/null &
echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24735 --work-dir work_dirs/resnest/pspnet_s101-d8_512x512_160k_ade20k >/dev/null &
echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24736 --work-dir work_dirs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes >/dev/null &
echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24737 --work-dir work_dirs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes >/dev/null &
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24738 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes >/dev/null &
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24739 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes >/dev/null &
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24740 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes >/dev/null &
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24741 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes >/dev/null &
echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24742 --work-dir work_dirs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k >/dev/null &
echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24743 --work-dir work_dirs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k >/dev/null &
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24744 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes >/dev/null &
echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24745 --work-dir work_dirs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K >/dev/null &
| 7,626 | 185.02439 | 420 | sh |
mmsegmentation | mmsegmentation-master/.dev/check_urls.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
from argparse import ArgumentParser
import requests
import yaml as yml
from mmseg.utils import get_root_logger
def check_url(url):
"""Check url response status.
Args:
url (str): url needed to check.
Returns:
int, bool: status code and check flag.
"""
r = requests.head(url)
status_code = r.status_code
flag = status_code not in [403, 404]
return status_code, flag
def parse_args():
parser = ArgumentParser('url valid check.')
parser.add_argument(
'-m',
'--model-name',
type=str,
help='Select the model needed to check')
return parser.parse_args()
def main():
args = parse_args()
model_name = args.model_name
# yml path generate.
# If model_name is not set, script will check all of the models.
if model_name is not None:
yml_list = [(model_name, f'configs/{model_name}/{model_name}.yml')]
else:
# check all
yml_list = [(x, f'configs/{x}/{x}.yml') for x in os.listdir('configs/')
if x != '_base_']
logger = get_root_logger(log_file='url_check.log', log_level=logging.ERROR)
for model_name, yml_path in yml_list:
# Default yaml loader unsafe.
model_infos = yml.load(
open(yml_path, 'r'), Loader=yml.CLoader)['Models']
for model_info in model_infos:
config_name = model_info['Name']
checkpoint_url = model_info['Weights']
# checkpoint url check
status_code, flag = check_url(checkpoint_url)
if flag:
logger.info(f'checkpoint | {config_name} | {checkpoint_url} | '
f'{status_code} valid')
else:
logger.error(
f'checkpoint | {config_name} | {checkpoint_url} | '
f'{status_code} | error')
# log_json check
checkpoint_name = checkpoint_url.split('/')[-1]
model_time = '-'.join(checkpoint_name.split('-')[:-1]).replace(
f'{config_name}_', '')
# two style of log_json name
# use '_' to link model_time (will be deprecated)
log_json_url_1 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}_{model_time}.log.json' # noqa
status_code_1, flag_1 = check_url(log_json_url_1)
# use '-' to link model_time
log_json_url_2 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}-{model_time}.log.json' # noqa
status_code_2, flag_2 = check_url(log_json_url_2)
if flag_1 or flag_2:
if flag_1:
logger.info(
f'log.json | {config_name} | {log_json_url_1} | '
f'{status_code_1} | valid')
else:
logger.info(
f'log.json | {config_name} | {log_json_url_2} | '
f'{status_code_2} | valid')
else:
logger.error(
f'log.json | {config_name} | {log_json_url_1} & '
f'{log_json_url_2} | {status_code_1} & {status_code_2} | '
'error')
if __name__ == '__main__':
main()
| 3,392 | 33.622449 | 153 | py |
mmsegmentation | mmsegmentation-master/.dev/gather_benchmark_evaluation_results.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked model evaluation results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out',
type=str,
default='benchmark_evaluation_info.json',
help='output path of gathered metrics and compared '
'results to be stored')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
previous_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
# Load benchmark evaluation json
metric_json_dir = osp.join(root_path, fname)
if not osp.exists(metric_json_dir):
print(f'{metric_json_dir} not existed.')
continue
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) == 0:
print(f'There is no eval json in {metric_json_dir}.')
continue
log_json_path = list(sorted(json_list))[-1]
metric = mmcv.load(log_json_path)
if config not in metric.get('config', {}):
print(f'{config} not included in {log_json_path}')
continue
# Compare between new benchmark results and previous metrics
differential_results = {}
new_metrics = {}
for record_metric_key in previous_metrics:
if record_metric_key not in metric['metric']:
raise KeyError('record_metric_key not exist, please '
'check your config')
old_metric = previous_metrics[record_metric_key]
new_metric = round(metric['metric'][record_metric_key] * 100,
2)
differential = new_metric - old_metric
flag = '+' if differential > 0 else '-'
differential_results[
record_metric_key] = f'{flag}{abs(differential):.2f}'
new_metrics[record_metric_key] = new_metric
result_dict[config] = dict(
differential=differential_results,
previous=previous_metrics,
new=new_metrics)
if metrics_out:
mmcv.dump(result_dict, metrics_out, indent=4)
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
| 3,183 | 33.608696 | 77 | py |
mmsegmentation | mmsegmentation-master/.dev/gather_benchmark_train_results.py | import argparse
import glob
import os.path as osp
import mmcv
from gather_models import get_final_results
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models train results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out',
type=str,
default='benchmark_train_info.json',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
evaluation_cfg = Config.fromfile(args.config)
result_dict = {}
for model_key in evaluation_cfg:
model_infos = evaluation_cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
config = model_info['config']
# benchmark train dir
model_name = osp.split(osp.dirname(config))[1]
config_name = osp.splitext(osp.basename(config))[0]
exp_dir = osp.join(root_path, model_name, config_name)
if not osp.exists(exp_dir):
print(f'{config} hasn\'t {exp_dir}')
continue
# parse config
cfg = mmcv.Config.fromfile(config)
total_iters = cfg.runner.max_iters
exp_metric = cfg.evaluation.metric
if not isinstance(exp_metric, list):
exp_metrics = [exp_metric]
# determine whether total_iters ckpt exists
ckpt_path = f'iter_{total_iters}.pth'
if not osp.exists(osp.join(exp_dir, ckpt_path)):
print(f'{config} hasn\'t {ckpt_path}')
continue
# only the last log json counts
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
# extract metric value
model_performance = get_final_results(log_json_path, total_iters)
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
differential_results = {}
old_results = {}
new_results = {}
for metric_key in model_performance:
if metric_key in ['mIoU']:
metric = round(model_performance[metric_key] * 100, 2)
old_metric = model_info['metric'][metric_key]
old_results[metric_key] = old_metric
new_results[metric_key] = metric
differential = metric - old_metric
flag = '+' if differential > 0 else '-'
differential_results[
metric_key] = f'{flag}{abs(differential):.2f}'
result_dict[config] = dict(
differential_results=differential_results,
old_results=old_results,
new_results=new_results,
)
# 4 save or print results
if metrics_out:
mmcv.dump(result_dict, metrics_out, indent=4)
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
| 3,481 | 33.475248 | 77 | py |
mmsegmentation | mmsegmentation-master/.dev/gather_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import hashlib
import json
import os
import os.path as osp
import shutil
import mmcv
import torch
# build schedule look-up table to automatically find the final model
RESULTS_LUT = ['mIoU', 'mAcc', 'aAcc']
def calculate_file_sha256(file_path):
"""calculate file sha256 hash code."""
with open(file_path, 'rb') as fp:
sha256_cal = hashlib.sha256()
sha256_cal.update(fp.read())
return sha256_cal.hexdigest()
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
# The hash code calculation and rename command differ on different system
# platform.
sha = calculate_file_sha256(out_file)
final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth'
os.rename(out_file, final_file)
# Remove prefix and suffix
final_file_name = osp.split(final_file)[1]
final_file_name = osp.splitext(final_file_name)[0]
return final_file_name
def get_final_iter(config):
iter_num = config.split('_')[-2]
if iter_num.endswith('k'):
return int(iter_num[:-1]) * 1000
else:
return int(iter_num)
def get_final_results(log_json_path, iter_num):
result_dict = {}
last_iter = 0
with open(log_json_path, 'r') as f:
for line in f:
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
# When evaluation, the 'iter' of new log json is the evaluation
# steps on single gpu.
flag1 = 'aAcc' in log_line or log_line['mode'] == 'val'
flag2 = last_iter in [iter_num - 50, iter_num]
if flag1 and flag2:
result_dict.update({
key: log_line[key]
for key in RESULTS_LUT if key in log_line
})
return result_dict
last_iter = log_line['iter']
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'-f', '--config-name', type=str, help='Process the selected config.')
parser.add_argument(
'-w',
'--work-dir',
default='work_dirs/',
type=str,
help='Ckpt storage root folder of benchmarked models to be gathered.')
parser.add_argument(
'-c',
'--collect-dir',
default='work_dirs/gather',
type=str,
help='Ckpt collect root folder of gathered models.')
parser.add_argument(
'--all', action='store_true', help='whether include .py and .log')
args = parser.parse_args()
return args
def main():
args = parse_args()
work_dir = args.work_dir
collect_dir = args.collect_dir
selected_config_name = args.config_name
mmcv.mkdir_or_exist(collect_dir)
# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
config_name = osp.splitext(osp.basename(raw_config))[0]
if osp.exists(osp.join(work_dir, config_name)):
if (selected_config_name is None
or selected_config_name == config_name):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
config_name = osp.splitext(osp.basename(used_config))[0]
exp_dir = osp.join(work_dir, config_name)
# check whether the exps is finished
final_iter = get_final_iter(used_config)
final_model = f'iter_{final_iter}.pth'
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
print(f'{used_config} train not finished yet')
continue
# get logs
log_json_paths = glob.glob(osp.join(exp_dir, '*.log.json'))
log_json_path = log_json_paths[0]
model_performance = None
for _log_json_path in log_json_paths:
model_performance = get_final_results(_log_json_path, final_iter)
if model_performance is not None:
log_json_path = _log_json_path
break
if model_performance is None:
print(f'{used_config} model_performance is None')
continue
model_time = osp.split(log_json_path)[-1].split('.')[0]
model_infos.append(
dict(
config_name=config_name,
results=model_performance,
iters=final_iter,
model_time=model_time,
log_json_path=osp.split(log_json_path)[-1]))
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
config_name = model['config_name']
model_publish_dir = osp.join(collect_dir, config_name)
publish_model_path = osp.join(model_publish_dir,
f'{config_name}_' + model['model_time'])
trained_model_path = osp.join(work_dir, config_name,
f'iter_{model["iters"]}.pth')
if osp.exists(model_publish_dir):
for file in os.listdir(model_publish_dir):
if file.endswith('.pth'):
print(f'model {file} found')
model['model_path'] = osp.abspath(
osp.join(model_publish_dir, file))
break
if 'model_path' not in model:
print(f'dir {model_publish_dir} exists, no model found')
else:
mmcv.mkdir_or_exist(model_publish_dir)
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
model['model_path'] = final_model_path
new_json_path = f'{config_name}_{model["log_json_path"]}'
# copy log
shutil.copy(
osp.join(work_dir, config_name, model['log_json_path']),
osp.join(model_publish_dir, new_json_path))
if args.all:
new_txt_path = new_json_path.rstrip('.json')
shutil.copy(
osp.join(work_dir, config_name,
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, new_txt_path))
if args.all:
# copy config to guarantee reproducibility
raw_config = osp.join('./configs', f'{config_name}.py')
mmcv.Config.fromfile(raw_config).dump(
osp.join(model_publish_dir, osp.basename(raw_config)))
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
mmcv.dump(models, osp.join(collect_dir, 'model_infos.json'), indent=4)
if __name__ == '__main__':
main()
| 7,368 | 33.596244 | 78 | py |
mmsegmentation | mmsegmentation-master/.dev/generate_benchmark_evaluation_script.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark test model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=28171, help='dist port')
parser.add_argument(
'--work-dir',
default='work_dirs/benchmark_evaluation',
help='the dir to save metric')
parser.add_argument(
'--out',
type=str,
default='.dev/benchmark_evaluation.sh',
help='path to save model benchmark script')
return parser.parse_args()
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
checkpoint = model_info['checkpoint'].strip()
work_dir = osp.join(work_dir, fname)
evals = model_info['eval'] if isinstance(model_info['eval'],
list) else [model_info['eval']]
eval = ' '.join(evals)
return dict(
config=config,
job_name=job_name,
checkpoint=checkpoint,
work_dir=work_dir,
eval=eval)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f'\necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--eval {eval} '
command_info += f'--work-dir {work_dir} '
command_info += f'--cfg-options dist_params.port={port} '
command_info += '&'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
commands = []
partition_name = 'PARTITION=$1'
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2'
commands.append(checkpoint_root)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
work_dir = args.work_dir
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str + '\n')
if __name__ == '__main__':
main()
| 3,381 | 28.929204 | 79 | py |
mmsegmentation | mmsegmentation-master/.dev/generate_benchmark_train_script.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
# Default using 4 gpu when training
config_8gpu_list = [
'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa
'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py',
'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py',
]
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument('--port', type=int, default=24727, help='dist port')
parser.add_argument(
'--out',
type=str,
default='.dev/benchmark_train.sh',
help='path to save model benchmark script')
args = parser.parse_args()
return args
def create_train_bash_info(commands, config, script_name, partition, port):
cfg = config.strip()
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
_, model_name = osp.split(osp.dirname(cfg))
config_name, _ = osp.splitext(osp.basename(cfg))
# default setting
if cfg in config_8gpu_list:
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {script_name} '
else:
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{config_name} '
command_info += f'{cfg} '
command_info += f'--cfg-options ' \
f'checkpoint_config.max_keep_ckpts=1 ' \
f'dist_params.port={port} '
command_info += f'--work-dir work_dirs/{model_name}/{config_name} '
# Let the script shut up
command_info += '>/dev/null &'
commands.append(command_info)
commands.append('\n')
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
root_name = './tools'
script_name = osp.join(root_name, 'slurm_train.sh')
port = args.port
partition_name = 'PARTITION=$1'
commands = [partition_name, '\n', '\n']
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for cfg in model_cfgs:
create_train_bash_info(commands, cfg, script_name, '$PARTITION',
port)
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if __name__ == '__main__':
main()
| 2,770 | 30.134831 | 103 | py |
mmsegmentation | mmsegmentation-master/.dev/md2yml.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
# This tool is used to update model-index.yml which is required by MIM, and
# will be automatically called as a pre-commit hook. The updating will be
# triggered if any change of model information (.md files in configs/) has been
# detected before a commit.
import glob
import os
import os.path as osp
import re
import sys
from lxml import etree
from mmcv.fileio import dump
MMSEG_ROOT = osp.dirname(osp.dirname((osp.dirname(__file__))))
COLLECTIONS = [
'ANN', 'APCNet', 'BiSeNetV1', 'BiSeNetV2', 'CCNet', 'CGNet', 'DANet',
'DeepLabV3', 'DeepLabV3+', 'DMNet', 'DNLNet', 'DPT', 'EMANet', 'EncNet',
'ERFNet', 'FastFCN', 'FastSCNN', 'FCN', 'GCNet', 'ICNet', 'ISANet', 'KNet',
'NonLocalNet', 'OCRNet', 'PointRend', 'PSANet', 'PSPNet', 'Segformer',
'Segmenter', 'FPN', 'SETR', 'STDC', 'UNet', 'UPerNet'
]
COLLECTIONS_TEMP = []
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
filename (str): YAML filename to dump the object to.
sort_keys (str); Sort key by dictionary order.
Returns:
Bool: If the target YAML file is different from the original.
"""
str_dump = dump(obj, None, file_format='yaml', sort_keys=sort_keys)
if osp.isfile(filename):
file_exists = True
with open(filename, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if file_exists and str_orig == str_dump:
is_different = False
else:
is_different = True
with open(filename, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different
def parse_md(md_file):
"""Parse .md file and convert it to a .yml file which can be used for MIM.
Args:
md_file (str): Path to .md file.
Returns:
Bool: If the target YAML file is different from the original.
"""
collection_name = osp.split(osp.dirname(md_file))[1]
configs = os.listdir(osp.dirname(md_file))
collection = dict(
Name=collection_name,
Metadata={'Training Data': []},
Paper={
'URL': '',
'Title': ''
},
README=md_file,
Code={
'URL': '',
'Version': ''
})
collection.update({'Converted From': {'Weights': '', 'Code': ''}})
models = []
datasets = []
paper_url = None
paper_title = None
code_url = None
code_version = None
repo_url = None
# To avoid re-counting number of backbone model in OpenMMLab,
# if certain model in configs folder is backbone whose name is already
# recorded in MMClassification, then the `COLLECTION` dict of this model
# in MMSegmentation should be deleted, and `In Collection` in `Models`
# should be set with head or neck of this config file.
is_backbone = None
with open(md_file, 'r', encoding='UTF-8') as md:
lines = md.readlines()
i = 0
current_dataset = ''
while i < len(lines):
line = lines[i].strip()
# In latest README.md the title and url are in the third line.
if i == 2:
paper_url = lines[i].split('](')[1].split(')')[0]
paper_title = lines[i].split('](')[0].split('[')[1]
if len(line) == 0:
i += 1
continue
elif line[:3] == '<a ':
content = etree.HTML(line)
node = content.xpath('//a')[0]
if node.text == 'Code Snippet':
code_url = node.get('href', None)
assert code_url is not None, (
f'{collection_name} hasn\'t code snippet url.')
# version extraction
filter_str = r'blob/(.*)/mm'
pattern = re.compile(filter_str)
code_version = pattern.findall(code_url)
assert len(code_version) == 1, (
f'false regular expression ({filter_str}) use.')
code_version = code_version[0]
elif node.text == 'Official Repo':
repo_url = node.get('href', None)
assert repo_url is not None, (
f'{collection_name} hasn\'t official repo url.')
i += 1
elif line[:4] == '### ':
datasets.append(line[4:])
current_dataset = line[4:]
i += 2
elif line[:15] == '<!-- [BACKBONE]':
is_backbone = True
i += 1
elif (line[0] == '|' and (i + 1) < len(lines)
and lines[i + 1][:3] == '| -' and 'Method' in line
and 'Crop Size' in line and 'Mem (GB)' in line):
cols = [col.strip() for col in line.split('|')]
method_id = cols.index('Method')
backbone_id = cols.index('Backbone')
crop_size_id = cols.index('Crop Size')
lr_schd_id = cols.index('Lr schd')
mem_id = cols.index('Mem (GB)')
fps_id = cols.index('Inf time (fps)')
try:
ss_id = cols.index('mIoU')
except ValueError:
ss_id = cols.index('Dice')
try:
ms_id = cols.index('mIoU(ms+flip)')
except ValueError:
ms_id = False
config_id = cols.index('config')
download_id = cols.index('download')
j = i + 2
while j < len(lines) and lines[j][0] == '|':
els = [el.strip() for el in lines[j].split('|')]
config = ''
model_name = ''
weight = ''
for fn in configs:
if fn in els[config_id]:
left = els[download_id].index(
'https://download.openmmlab.com')
right = els[download_id].index('.pth') + 4
weight = els[download_id][left:right]
config = f'configs/{collection_name}/{fn}'
model_name = fn[:-3]
fps = els[fps_id] if els[fps_id] != '-' and els[
fps_id] != '' else -1
mem = els[mem_id].split(
'\\'
)[0] if els[mem_id] != '-' and els[mem_id] != '' else -1
crop_size = els[crop_size_id].split('x')
assert len(crop_size) == 2
method = els[method_id].split()[0].split('-')[-1]
model = {
'Name':
model_name,
'In Collection':
method,
'Metadata': {
'backbone': els[backbone_id],
'crop size': f'({crop_size[0]},{crop_size[1]})',
'lr schd': int(els[lr_schd_id]),
},
'Results': [
{
'Task': 'Semantic Segmentation',
'Dataset': current_dataset,
'Metrics': {
cols[ss_id]: float(els[ss_id]),
},
},
],
'Config':
config,
'Weights':
weight,
}
if fps != -1:
try:
fps = float(fps)
except Exception:
j += 1
continue
model['Metadata']['inference time (ms/im)'] = [{
'value':
round(1000 / float(fps), 2),
'hardware':
'V100',
'backend':
'PyTorch',
'batch size':
1,
'mode':
'FP32' if 'fp16' not in config else 'FP16',
'resolution':
f'({crop_size[0]},{crop_size[1]})'
}]
if mem != -1:
model['Metadata']['Training Memory (GB)'] = float(mem)
# Only have semantic segmentation now
if ms_id and els[ms_id] != '-' and els[ms_id] != '':
model['Results'][0]['Metrics'][
'mIoU(ms+flip)'] = float(els[ms_id])
models.append(model)
j += 1
i = j
else:
i += 1
flag = (code_url is not None) and (paper_url is not None) and (repo_url
is not None)
assert flag, f'{collection_name} readme error'
collection['Name'] = method
collection['Metadata']['Training Data'] = datasets
collection['Code']['URL'] = code_url
collection['Code']['Version'] = code_version
collection['Paper']['URL'] = paper_url
collection['Paper']['Title'] = paper_title
collection['Converted From']['Code'] = repo_url
# ['Converted From']['Weights] miss
# remove empty attribute
check_key_list = ['Code', 'Paper', 'Converted From']
for check_key in check_key_list:
key_list = list(collection[check_key].keys())
for key in key_list:
if check_key not in collection:
break
if collection[check_key][key] == '':
if len(collection[check_key].keys()) == 1:
collection.pop(check_key)
else:
collection[check_key].pop(key)
yml_file = f'{md_file[:-9]}{collection_name}.yml'
if is_backbone:
if collection['Name'] not in COLLECTIONS:
result = {
'Collections': [collection],
'Models': models,
'Yml': yml_file
}
COLLECTIONS_TEMP.append(result)
return False
else:
result = {'Models': models}
else:
COLLECTIONS.append(collection['Name'])
result = {'Collections': [collection], 'Models': models}
return dump_yaml_and_check_difference(result, yml_file)
def update_model_index():
"""Update model-index.yml according to model .md files.
Returns:
Bool: If the updated model-index.yml is different from the original.
"""
configs_dir = osp.join(MMSEG_ROOT, 'configs')
yml_files = glob.glob(osp.join(configs_dir, '**', '*.yml'), recursive=True)
yml_files.sort()
# add .replace('\\', '/') to avoid Windows Style path
model_index = {
'Import': [
osp.relpath(yml_file, MMSEG_ROOT).replace('\\', '/')
for yml_file in yml_files
]
}
model_index_file = osp.join(MMSEG_ROOT, 'model-index.yml')
is_different = dump_yaml_and_check_difference(model_index,
model_index_file)
return is_different
if __name__ == '__main__':
file_list = [fn for fn in sys.argv[1:] if osp.basename(fn) == 'README.md']
if not file_list:
sys.exit(0)
file_modified = False
for fn in file_list:
file_modified |= parse_md(fn)
for result in COLLECTIONS_TEMP:
collection = result['Collections'][0]
yml_file = result.pop('Yml', None)
if collection['Name'] in COLLECTIONS:
result.pop('Collections')
file_modified |= dump_yaml_and_check_difference(result, yml_file)
file_modified |= update_model_index()
sys.exit(1 if file_modified else 0)
| 12,306 | 37.701258 | 79 | py |
mmsegmentation | mmsegmentation-master/.dev/upload_modelzoo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import oss2
ACCESS_KEY_ID = os.getenv('OSS_ACCESS_KEY_ID', None)
ACCESS_KEY_SECRET = os.getenv('OSS_ACCESS_KEY_SECRET', None)
BUCKET_NAME = 'openmmlab'
ENDPOINT = 'https://oss-accelerate.aliyuncs.com'
def parse_args():
parser = argparse.ArgumentParser(description='Upload models to OSS')
parser.add_argument('model_zoo', type=str, help='model_zoo input')
parser.add_argument(
'--dst-folder',
type=str,
default='mmsegmentation/v0.5',
help='destination folder')
return parser.parse_args()
def main():
args = parse_args()
model_zoo = args.model_zoo
dst_folder = args.dst_folder
bucket = oss2.Bucket(
oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET), ENDPOINT, BUCKET_NAME)
for root, dirs, files in os.walk(model_zoo):
for file in files:
file_path = osp.relpath(osp.join(root, file), model_zoo)
print(f'Uploading {file_path}')
oss2.resumable_upload(bucket, osp.join(dst_folder, file_path),
osp.join(model_zoo, file_path))
bucket.put_object_acl(
osp.join(dst_folder, file_path), oss2.OBJECT_ACL_PUBLIC_READ)
if __name__ == '__main__':
main()
| 1,324 | 28.444444 | 77 | py |
mmsegmentation | mmsegmentation-master/.dev/log_collector/example_config.py | work_dir = '../../work_dirs'
metric = 'mIoU'
# specify the log files we would like to collect in `log_items`
log_items = [
'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup',
'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr',
'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr',
'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr'
]
# or specify ignore_keywords, then the folders whose name contain
# `'segformer'` won't be collected
# ignore_keywords = ['segformer']
# should not include metric
other_info_keys = ['mAcc']
markdown_file = 'markdowns/lr_in_trans.json.md'
json_file = 'jsons/trans_in_cnn.json'
| 641 | 32.789474 | 65 | py |
mmsegmentation | mmsegmentation-master/.dev/log_collector/log_collector.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import datetime
import json
import os
import os.path as osp
from collections import OrderedDict
from utils import load_config
# automatically collect all the results
# The structure of the directory:
# ├── work-dir
# │ ├── config_1
# │ │ ├── time1.log.json
# │ │ ├── time2.log.json
# │ │ ├── time3.log.json
# │ │ ├── time4.log.json
# │ ├── config_2
# │ │ ├── time5.log.json
# │ │ ├── time6.log.json
# │ │ ├── time7.log.json
# │ │ ├── time8.log.json
def parse_args():
parser = argparse.ArgumentParser(description='extract info from log.json')
parser.add_argument('config_dir')
return parser.parse_args()
def has_keyword(name: str, keywords: list):
return any(a_keyword in name for a_keyword in keywords)
def main():
args = parse_args()
cfg = load_config(args.config_dir)
work_dir = cfg['work_dir']
metric = cfg['metric']
log_items = cfg.get('log_items', [])
ignore_keywords = cfg.get('ignore_keywords', [])
other_info_keys = cfg.get('other_info_keys', [])
markdown_file = cfg.get('markdown_file', None)
json_file = cfg.get('json_file', None)
if json_file and osp.split(json_file)[0] != '':
os.makedirs(osp.split(json_file)[0], exist_ok=True)
if markdown_file and osp.split(markdown_file)[0] != '':
os.makedirs(osp.split(markdown_file)[0], exist_ok=True)
assert not (log_items and ignore_keywords), \
'log_items and ignore_keywords cannot be specified at the same time'
assert metric not in other_info_keys, \
'other_info_keys should not contain metric'
if ignore_keywords and isinstance(ignore_keywords, str):
ignore_keywords = [ignore_keywords]
if other_info_keys and isinstance(other_info_keys, str):
other_info_keys = [other_info_keys]
if log_items and isinstance(log_items, str):
log_items = [log_items]
if not log_items:
log_items = [
item for item in sorted(os.listdir(work_dir))
if not has_keyword(item, ignore_keywords)
]
experiment_info_list = []
for config_dir in log_items:
preceding_path = os.path.join(work_dir, config_dir)
log_list = [
item for item in os.listdir(preceding_path)
if item.endswith('.log.json')
]
log_list = sorted(
log_list,
key=lambda time_str: datetime.datetime.strptime(
time_str, '%Y%m%d_%H%M%S.log.json'))
val_list = []
last_iter = 0
for log_name in log_list:
with open(os.path.join(preceding_path, log_name), 'r') as f:
# ignore the info line
f.readline()
all_lines = f.readlines()
val_list.extend([
json.loads(line) for line in all_lines
if json.loads(line)['mode'] == 'val'
])
for index in range(len(all_lines) - 1, -1, -1):
line_dict = json.loads(all_lines[index])
if line_dict['mode'] == 'train':
last_iter = max(last_iter, line_dict['iter'])
break
new_log_dict = dict(
method=config_dir, metric_used=metric, last_iter=last_iter)
for index, log in enumerate(val_list, 1):
new_ordered_dict = OrderedDict()
new_ordered_dict['eval_index'] = index
new_ordered_dict[metric] = log[metric]
for key in other_info_keys:
if key in log:
new_ordered_dict[key] = log[key]
val_list[index - 1] = new_ordered_dict
assert len(val_list) >= 1, \
f"work dir {config_dir} doesn't contain any evaluation."
new_log_dict['last eval'] = val_list[-1]
new_log_dict['best eval'] = max(val_list, key=lambda x: x[metric])
experiment_info_list.append(new_log_dict)
print(f'{config_dir} is processed')
if json_file:
with open(json_file, 'w') as f:
json.dump(experiment_info_list, f, indent=4)
if markdown_file:
lines_to_write = []
for index, log in enumerate(experiment_info_list, 1):
lines_to_write.append(
f"|{index}|{log['method']}|{log['best eval'][metric]}"
f"|{log['best eval']['eval_index']}|"
f"{log['last eval'][metric]}|"
f"{log['last eval']['eval_index']}|{log['last_iter']}|\n")
with open(markdown_file, 'w') as f:
f.write(f'|exp_num|method|{metric} best|best index|'
f'{metric} last|last index|last iter num|\n')
f.write('|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n')
f.writelines(lines_to_write)
print('processed successfully')
if __name__ == '__main__':
main()
| 4,962 | 34.45 | 78 | py |
mmsegmentation | mmsegmentation-master/.dev/log_collector/readme.md | # Log Collector
## Function
Automatically collect logs and write the result in a json file or markdown file.
If there are several `.log.json` files in one folder, Log Collector assumes that the `.log.json` files other than the first one are resume from the preceding `.log.json` file. Log Collector returns the result considering all `.log.json` files.
## Usage:
To use log collector, you need to write a config file to configure the log collector first.
For example:
example_config.py:
```python
# The work directory that contains folders that contains .log.json files.
work_dir = '../../work_dirs'
# The metric used to find the best evaluation.
metric = 'mIoU'
# **Don't specify the log_items and ignore_keywords at the same time.**
# Specify the log files we would like to collect in `log_items`.
# The folders specified should be the subdirectories of `work_dir`.
log_items = [
'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup',
'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr',
'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr',
'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr'
]
# Or specify `ignore_keywords`. The folders whose name contain one
# of the keywords in the `ignore_keywords` list(e.g., `'segformer'`)
# won't be collected.
# ignore_keywords = ['segformer']
# Other log items in .log.json that you want to collect.
# should not include metric.
other_info_keys = ["mAcc"]
# The output markdown file's name.
markdown_file ='markdowns/lr_in_trans.json.md'
# The output json file's name. (optional)
json_file = 'jsons/trans_in_cnn.json'
```
The structure of the work-dir directory should be like:
```text
├── work-dir
│ ├── folder1
│ │ ├── time1.log.json
│ │ ├── time2.log.json
│ │ ├── time3.log.json
│ │ ├── time4.log.json
│ ├── folder2
│ │ ├── time5.log.json
│ │ ├── time6.log.json
│ │ ├── time7.log.json
│ │ ├── time8.log.json
```
Then , cd to the log collector folder.
Now you can run log_collector.py by using command:
```bash
python log_collector.py ./example_config.py
```
The output markdown file is like:
| exp_num | method | mIoU best | best index | mIoU last | last index | last iter num |
| :-----: | :-----------------------------------------------------: | :-------: | :--------: | :-------: | :--------: | :-----------: |
| 1 | segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup | 0.2776 | 10 | 0.2776 | 10 | 160000 |
| 2 | segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr | 0.2802 | 10 | 0.2802 | 10 | 160000 |
| 3 | segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr | 0.4943 | 11 | 0.4943 | 11 | 160000 |
| 4 | segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr | 0.4883 | 11 | 0.4883 | 11 | 160000 |
The output json file is like:
```json
[
{
"method": "segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup",
"metric_used": "mIoU",
"last_iter": 160000,
"last eval": {
"eval_index": 10,
"mIoU": 0.2776,
"mAcc": 0.3779
},
"best eval": {
"eval_index": 10,
"mIoU": 0.2776,
"mAcc": 0.3779
}
},
{
"method": "segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr",
"metric_used": "mIoU",
"last_iter": 160000,
"last eval": {
"eval_index": 10,
"mIoU": 0.2802,
"mAcc": 0.3764
},
"best eval": {
"eval_index": 10,
"mIoU": 0.2802,
"mAcc": 0.3764
}
},
{
"method": "segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr",
"metric_used": "mIoU",
"last_iter": 160000,
"last eval": {
"eval_index": 11,
"mIoU": 0.4943,
"mAcc": 0.6097
},
"best eval": {
"eval_index": 11,
"mIoU": 0.4943,
"mAcc": 0.6097
}
},
{
"method": "segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr",
"metric_used": "mIoU",
"last_iter": 160000,
"last eval": {
"eval_index": 11,
"mIoU": 0.4883,
"mAcc": 0.6061
},
"best eval": {
"eval_index": 11,
"mIoU": 0.4883,
"mAcc": 0.6061
}
}
]
```
| 4,517 | 30.158621 | 243 | md |
mmsegmentation | mmsegmentation-master/.dev/log_collector/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
# modified from https://github.dev/open-mmlab/mmcv
import os.path as osp
import sys
from importlib import import_module
def load_config(cfg_dir: str) -> dict:
assert cfg_dir.endswith('.py')
root_path, file_name = osp.split(cfg_dir)
temp_module = osp.splitext(file_name)[0]
sys.path.insert(0, root_path)
mod = import_module(temp_module)
sys.path.pop(0)
cfg_dict = {
k: v
for k, v in mod.__dict__.items() if not k.startswith('__')
}
del sys.modules[temp_module]
return cfg_dict
| 582 | 26.761905 | 66 | py |
mmsegmentation | mmsegmentation-master/.github/CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or
advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic
address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
[homepage]: https://www.contributor-covenant.org
| 3,355 | 42.584416 | 87 | md |
mmsegmentation | mmsegmentation-master/.github/CONTRIBUTING.md | # Contributing to mmsegmentation
All kinds of contributions are welcome, including but not limited to the following.
- Fixes (typo, bugs)
- New features and components
## Workflow
1. fork and pull the latest mmsegmentation
2. checkout a new branch (do not use master branch for PRs)
3. commit your changes
4. create a PR
:::{note}
- If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first.
- If you are the author of some papers and would like to include your method to mmsegmentation,
please contact Kai Chen (chenkaidev\[at\]gmail\[dot\]com). We will much appreciate your contribution.
:::
## Code style
### Python
We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style.
We use the following tools for linting and formatting:
- [flake8](http://flake8.pycqa.org/en/latest/): linter
- [yapf](https://github.com/google/yapf): formatter
- [isort](https://github.com/timothycrosley/isort): sort imports
Style configurations of yapf and isort can be found in [setup.cfg](../setup.cfg) and [.isort.cfg](../.isort.cfg).
We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`,
fixes `end-of-files`, sorts `requirments.txt` automatically on every commit.
The config for a pre-commit hook is stored in [.pre-commit-config](../.pre-commit-config.yaml).
After you clone the repository, you will need to install initialize pre-commit hook.
```shell
pip install -U pre-commit
```
From the repository folder
```shell
pre-commit install
```
After this on every commit check code linters and formatter will be enforced.
> Before you create a PR, make sure that your code lints and is formatted by yapf.
### C++ and CUDA
We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
| 1,889 | 31.033898 | 128 | md |
mmsegmentation | mmsegmentation-master/.github/pull_request_template.md | Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.
## Motivation
Please describe the motivation of this PR and the goal you want to achieve through this PR.
## Modification
Please briefly describe what modification is made in this PR.
## BC-breaking (Optional)
Does the modification introduce changes that break the backward-compatibility of the downstream repos?
If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.
## Use cases (Optional)
If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.
## Checklist
1. Pre-commit or other linting tools are used to fix the potential lint issues.
2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.
3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D.
4. The documentation has been modified accordingly, like docstring or example tutorials.
| 1,312 | 49.5 | 264 | md |
mmsegmentation | mmsegmentation-master/.github/ISSUE_TEMPLATE/config.yml | blank_issues_enabled: false
contact_links:
- name: MMSegmentation Documentation
url: https://mmsegmentation.readthedocs.io
about: Check the docs and FAQ to see if you question is already answered.
| 208 | 28.857143 | 77 | yml |
mmsegmentation | mmsegmentation-master/.github/ISSUE_TEMPLATE/error-report.md | ---
name: Error report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
Thanks for your error report and we appreciate it a lot.
**Checklist**
1. I have searched related issues but cannot get the expected help.
2. The bug has not been fixed in the latest version.
**Describe the bug**
A clear and concise description of what the bug is.
**Reproduction**
1. What command or script did you run?
```none
A placeholder for the command.
```
2. Did you make any modifications on the code or config? Did you understand what you have modified?
3. What dataset did you use?
**Environment**
1. Please run `python mmseg/utils/collect_env.py` to collect necessary environment information and paste it here.
2. You may add addition that may be helpful for locating the problem, such as
- How you installed PyTorch \[e.g., pip, conda, source\]
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
**Error traceback**
If applicable, paste the error trackback here.
```none
A placeholder for trackback.
```
**Bug fix**
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
| 1,317 | 25.897959 | 194 | md |
mmsegmentation | mmsegmentation-master/.github/ISSUE_TEMPLATE/feature_request.md | ---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
# Describe the feature
**Motivation**
A clear and concise description of the motivation of the feature.
Ex1. It is inconvenient when \[....\].
Ex2. There is a recent paper \[....\], which is very helpful for \[....\].
**Related resources**
If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful.
**Additional context**
Add any other context or screenshots about the feature request here.
If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated.
| 705 | 31.090909 | 139 | md |
mmsegmentation | mmsegmentation-master/.github/ISSUE_TEMPLATE/general_questions.md | ---
name: General questions
about: Ask general questions to get help
title: ''
labels: ''
assignees: ''
---
| 108 | 12.625 | 40 | md |
mmsegmentation | mmsegmentation-master/.github/ISSUE_TEMPLATE/reimplementation_questions.md | ---
name: Reimplementation Questions
about: Ask about questions during model reimplementation
title: ''
labels: reimplementation
assignees: ''
---
If you feel we have helped you, give us a STAR! :satisfied:
**Notice**
There are several common situations in the reimplementation issues as below
1. Reimplement a model in the model zoo using the provided configs
2. Reimplement a model in the model zoo on other datasets (e.g., custom datasets)
3. Reimplement a custom model but all the components are implemented in MMSegmentation
4. Reimplement a custom model with new modules implemented by yourself
There are several things to do for different cases as below.
- For cases 1 & 3, please follow the steps in the following sections thus we could help to quickly identify the issue.
- For cases 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code, and the users should be responsible for the code they write.
- One suggestion for cases 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections, and try as clear as possible so that we can better help you.
**Checklist**
1. I have searched related issues but cannot get the expected help.
2. The issue has not been fixed in the latest version.
**Describe the issue**
A clear and concise description of the problem you meet and what you have done.
**Reproduction**
1. What command or script did you run?
```
A placeholder for the command.
```
2. What config dir you run?
```
A placeholder for the config.
```
3. Did you make any modifications to the code or config? Did you understand what you have modified?
4. What dataset did you use?
**Environment**
1. Please run `PYTHONPATH=${PWD}:$PYTHONPATH python mmseg/utils/collect_env.py` to collect the necessary environment information and paste it here.
2. You may add an addition that may be helpful for locating the problem, such as
1. How you installed PyTorch \[e.g., pip, conda, source\]
2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
**Results**
If applicable, paste the related results here, e.g., what you expect and what you get.
```
A placeholder for results comparison
```
**Issue fix**
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
| 2,728 | 37.985714 | 435 | md |
mmsegmentation | mmsegmentation-master/.github/workflows/build.yml | name: build
on:
push:
paths-ignore:
- 'demo/**'
- '.dev/**'
- 'docker/**'
- 'tools/**'
- '**.md'
- 'projects/**'
pull_request:
paths-ignore:
- 'demo/**'
- '.dev/**'
- 'docker/**'
- 'tools/**'
- 'docs/**'
- '**.md'
- 'projects/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_cpu:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0]
include:
- torch: 1.5.1
torch_version: torch1.5
torchvision: 0.6.1
- torch: 1.6.0
torch_version: torch1.6
torchvision: 0.7.0
- torch: 1.7.0
torch_version: torch1.7
torchvision: 0.8.1
- torch: 1.8.0
torch_version: torch1.8
torchvision: 0.9.0
- torch: 1.9.0
torch_version: torch1.9
torchvision: 0.10.0
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install MMCV
run: |
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/${{matrix.torch_version}}/index.html
python -c 'import mmcv; print(mmcv.__version__)'
- name: Install unittest dependencies
run: |
pip install -r requirements.txt
pip install albumentations>=0.3.2 --no-binary qudida,albumentations
- name: Build and install
run: rm -rf .eggs && pip install -e .
- name: Run unittests and generate coverage report
run: |
pip install timm
coverage run --branch --source mmseg -m pytest tests/
coverage xml
coverage report -m
# timm from v0.6.11 requires torch>=1.7
if: ${{matrix.torch != '1.5.1' && matrix.torch != '1.6.0'}}
- name: Skip timm unittests and generate coverage report
run: |
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
coverage xml
coverage report -m
if: ${{matrix.torch == '1.5.1' || matrix.torch == '1.6.0'}}
build_cuda101:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
strategy:
matrix:
python-version: [3.7]
torch:
[
1.5.1+cu101,
1.6.0+cu101,
1.7.0+cu101,
1.8.0+cu101
]
include:
- torch: 1.5.1+cu101
torch_version: torch1.5
torchvision: 0.6.1+cu101
- torch: 1.6.0+cu101
torch_version: torch1.6
torchvision: 0.7.0+cu101
- torch: 1.7.0+cu101
torch_version: torch1.7
torchvision: 0.8.1+cu101
- torch: 1.8.0+cu101
torch_version: torch1.8
torchvision: 0.9.0+cu101
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Install system dependencies
run: |
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmseg dependencies
run: |
python -V
python -m pip install -U openmim
mim install mmcv-full
python -m pip install -r requirements.txt
python -m pip install albumentations>=0.3.2 --no-binary qudida,albumentations
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install .
- name: Run unittests and generate coverage report
run: |
python -m pip install timm
coverage run --branch --source mmseg -m pytest tests/
coverage xml
coverage report -m
# timm from v0.6.11 requires torch>=1.7
if: ${{matrix.torch != '1.5.1+cu101' && matrix.torch != '1.6.0+cu101'}}
- name: Skip timm unittests and generate coverage report
run: |
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
coverage xml
coverage report -m
if: ${{matrix.torch == '1.5.1+cu101' || matrix.torch == '1.6.0+cu101'}}
- name: Upload coverage to Codecov
uses: codecov/[email protected]
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_cuda102:
env:
LC_ALL: C.UTF-8
LANG: C.UTF-8
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9]
torch: [1.9.0+cu102]
include:
- torch: 1.9.0+cu102
torch_version: torch1.9
torchvision: 0.10.0+cu102
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Install system dependencies
run: |
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmseg dependencies
run: |
python -V
python -m pip install openmim
mim install mmcv-full
python -m pip install -r requirements.txt
python -m pip install albumentations>=0.3.2 --no-binary qudida,albumentations
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install .
- name: Run unittests and generate coverage report
run: |
python -m pip install timm
coverage run --branch --source mmseg -m pytest tests/
coverage xml
coverage report -m
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
files: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_cuda116:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel
env:
FORCE_CUDA: 1
MMCV_CUDA_ARGS: -gencode=arch=compute_61,code=sm_61
strategy:
matrix:
python-version: ['3.10']
torch: [1.13.0+cu116]
include:
- torch: 1.13.0+cu116
torchvision: 0.14.0+cu116
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Add PPA
run: |
apt-get update && apt-get install -y software-properties-common
add-apt-repository -y ppa:deadsnakes/ppa
- name: Install python-dev
run: apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python${{matrix.python-version}}-dev
- name: python -m Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install system dependencies
run: apt-get update && apt-get install -y ffmpeg libturbojpeg ninja-build
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmseg dependencies
run: |
python -V
python -m pip install openmim
mim install mmcv-full
python -m pip install -r requirements.txt
python -m pip install albumentations>=0.3.2 --no-binary qudida,albumentations
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install .
- name: Run unittests and generate coverage report
run: |
python -m pip install timm
coverage run --branch --source mmseg -m pytest tests/
coverage xml
coverage report -m
test_windows:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-2022]
python: [3.8]
platform: [cpu, cu111]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Upgrade pip
run: python -m pip install pip --upgrade --user
- name: Install OpenCV
run: pip install opencv-python>=3
- name: Install PyTorch
# As a complement to Linux CI, we test on PyTorch LTS version
run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- name: Install MMCV
run: |
pip install -U openmim
mim install mmcv-full
- name: Install unittest dependencies
run: |
pip install -r requirements/tests.txt -r requirements/optional.txt
pip install albumentations>=0.3.2 --no-binary qudida,albumentations
- name: Build and install
run: pip install -e .
- name: Run unittests
run: |
python -m pip install timm
coverage run --branch --source mmseg -m pytest tests/
- name: Generate coverage report
run: |
coverage xml
coverage report -m
| 11,980 | 36.323988 | 182 | yml |
mmsegmentation | mmsegmentation-master/.github/workflows/deploy.yml | name: deploy
on: push
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-n-publish:
runs-on: ubuntu-latest
if: startsWith(github.event.ref, 'refs/tags')
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Build MMSegmentation
run: |
pip install wheel
python setup.py sdist bdist_wheel
- name: Publish distribution to PyPI
run: |
pip install twine
twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
| 659 | 23.444444 | 74 | yml |
mmsegmentation | mmsegmentation-master/.github/workflows/lint.yml | name: lint
on: [push, pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
lint:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install pre-commit hook
run: |
pip install pre-commit
pre-commit install
- name: Linting
run: |
pre-commit run --all-files
- name: Check docstring coverage
run: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --exclude mmseg/ops --ignore-regex "__repr__" --fail-under 80 mmseg
| 761 | 25.275862 | 155 | yml |
mmsegmentation | mmsegmentation-master/.github/workflows/test_mim.yml | name: test-mim
on:
push:
paths:
- 'model-index.yml'
- 'configs/**'
pull_request:
paths:
- 'model-index.yml'
- 'configs/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_cpu:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.8.0]
include:
- torch: 1.8.0
torch_version: torch1.8
torchvision: 0.9.0
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install openmim
run: pip install openmim
- name: Build and install
run: rm -rf .eggs && mim install -e .
- name: test commands of mim
run: mim search mmsegmentation
| 1,156 | 24.711111 | 148 | yml |
mmsegmentation | mmsegmentation-master/configs/_base_/default_runtime.py | # yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
# dict(type='PaviLoggerHook') # for internal services
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
| 383 | 23 | 61 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/ade20k.py | # dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| 1,844 | 32.545455 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/ade20k_640x640.py | # dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| 1,844 | 32.545455 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/chase_db1.py | # dataset settings
dataset_type = 'ChaseDB1Dataset'
data_root = 'data/CHASE_DB1'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (960, 999)
crop_size = (128, 128)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=40000,
dataset=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| 1,924 | 31.083333 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/cityscapes.py | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/train',
ann_dir='gtFine/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline))
| 1,780 | 31.381818 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/cityscapes_1024x1024.py | _base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 1,283 | 34.666667 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/cityscapes_768x768.py | _base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (768, 768)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2049, 1025),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 1,281 | 34.611111 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/cityscapes_769x769.py | _base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (769, 769)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2049, 1025),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 1,281 | 34.611111 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/cityscapes_832x832.py | _base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (832, 832)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 1,281 | 34.611111 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/coco-stuff10k.py | # dataset settings
dataset_type = 'COCOStuffDataset'
data_root = 'data/coco_stuff10k'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
reduce_zero_label=True,
img_dir='images/train2014',
ann_dir='annotations/train2014',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
reduce_zero_label=True,
img_dir='images/test2014',
ann_dir='annotations/test2014',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
reduce_zero_label=True,
img_dir='images/test2014',
ann_dir='annotations/test2014',
pipeline=test_pipeline))
| 1,926 | 32.224138 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/coco-stuff164k.py | # dataset settings
dataset_type = 'COCOStuffDataset'
data_root = 'data/coco_stuff164k'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/train2017',
ann_dir='annotations/train2017',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/val2017',
ann_dir='annotations/val2017',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/val2017',
ann_dir='annotations/val2017',
pipeline=test_pipeline))
| 1,803 | 31.8 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/drive.py | # dataset settings
dataset_type = 'DRIVEDataset'
data_root = 'data/DRIVE'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (584, 565)
crop_size = (64, 64)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=40000,
dataset=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| 1,915 | 30.933333 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/hrf.py | # dataset settings
dataset_type = 'HRFDataset'
data_root = 'data/HRF'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (2336, 3504)
crop_size = (256, 256)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=40000,
dataset=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| 1,915 | 30.933333 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/imagenets.py | # dataset settings
dataset_type = 'ImageNetSDataset'
subset = 919
data_root = 'data/ImageNetS/ImageNetS919'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (224, 224)
train_pipeline = [
dict(type='LoadImageNetSImageFromFile', downsample_large_image=True),
dict(type='LoadImageNetSAnnotations', reduce_zero_label=False),
dict(type='Resize', img_scale=(1024, 256), ratio_range=(0.5, 2.0)),
dict(
type='RandomCrop',
crop_size=crop_size,
cat_max_ratio=0.75,
ignore_index=1000),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=1000),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageNetSImageFromFile', downsample_large_image=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 256),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
subset=subset,
data_root=data_root,
img_dir='train-semi',
ann_dir='train-semi-segmentation',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
subset=subset,
data_root=data_root,
img_dir='validation',
ann_dir='validation-segmentation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
subset=subset,
data_root=data_root,
img_dir='validation',
ann_dir='validation-segmentation',
pipeline=test_pipeline))
| 1,996 | 31.209677 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/isaid.py | # dataset settings
dataset_type = 'iSAIDDataset'
data_root = 'data/iSAID'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
"""
This crop_size setting is followed by the implementation of
`PointFlow: Flowing Semantics Through Points for Aerial Image
Segmentation <https://arxiv.org/pdf/2103.06564.pdf>`_.
"""
crop_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(896, 896), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(896, 896),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/train',
ann_dir='ann_dir/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline))
| 1,943 | 29.857143 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/loveda.py | # dataset settings
dataset_type = 'LoveDADataset'
data_root = 'data/loveDA'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/train',
ann_dir='ann_dir/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline))
| 1,784 | 31.454545 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/occlude_face.py | dataset_type = 'FaceOccludedDataset'
data_root = 'data/occlusion-aware-face-dataset'
crop_size = (512, 512)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(512, 512)),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomRotate', degree=(-30, 30), prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='ResizeToMultiple', size_divisor=32),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
dataset_train_A = dict(
type=dataset_type,
data_root=data_root,
img_dir='NatOcc_hand_sot/img',
ann_dir='NatOcc_hand_sot/mask',
split='train.txt',
pipeline=train_pipeline)
dataset_train_B = dict(
type=dataset_type,
data_root=data_root,
img_dir='NatOcc_object/img',
ann_dir='NatOcc_object/mask',
split='train.txt',
pipeline=train_pipeline)
dataset_train_C = dict(
type=dataset_type,
data_root=data_root,
img_dir='RandOcc/img',
ann_dir='RandOcc/mask',
split='train.txt',
pipeline=train_pipeline)
dataset_valid = dict(
type=dataset_type,
data_root=data_root,
img_dir='RealOcc/image',
ann_dir='RealOcc/mask',
split='RealOcc/split/val.txt',
pipeline=test_pipeline)
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=[dataset_train_A, dataset_train_B, dataset_train_C],
val=dataset_valid)
| 2,277 | 27.835443 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/pascal_context.py | # dataset settings
dataset_type = 'PascalContextDataset'
data_root = 'data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline))
| 1,998 | 31.770492 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/pascal_context_59.py | # dataset settings
dataset_type = 'PascalContextDataset59'
data_root = 'data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline))
| 2,024 | 32.196721 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/pascal_voc12.py | # dataset settings
dataset_type = 'PascalVOCDataset'
data_root = 'data/VOCdevkit/VOC2012'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline))
| 1,930 | 32.293103 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/pascal_voc12_aug.py | _base_ = './pascal_voc12.py'
# dataset settings
data = dict(
train=dict(
ann_dir=['SegmentationClass', 'SegmentationClassAug'],
split=[
'ImageSets/Segmentation/train.txt',
'ImageSets/Segmentation/aug.txt'
]))
| 261 | 25.2 | 62 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/potsdam.py | # dataset settings
dataset_type = 'PotsdamDataset'
data_root = 'data/potsdam'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/train',
ann_dir='ann_dir/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline))
| 1,783 | 31.436364 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/stare.py | # dataset settings
dataset_type = 'STAREDataset'
data_root = 'data/STARE'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (605, 700)
crop_size = (128, 128)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=40000,
dataset=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| 1,917 | 30.966667 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/datasets/vaihingen.py | # dataset settings
dataset_type = 'ISPRSDataset'
data_root = 'data/vaihingen'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/train',
ann_dir='ann_dir/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='img_dir/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline))
| 1,783 | 31.436364 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/ann_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ANNHead',
in_channels=[1024, 2048],
in_index=[2, 3],
channels=512,
project_channels=256,
query_scales=(1, ),
key_pool_scales=(1, 3, 6, 8),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,346 | 27.659574 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/apcnet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='APCHead',
in_channels=2048,
in_index=3,
channels=512,
pool_scales=(1, 2, 3, 6),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,302 | 27.955556 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/bisenetv1_r18-d32.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='BiSeNetV1',
in_channels=3,
context_channels=(128, 256, 512),
spatial_channels=(64, 64, 64, 128),
out_indices=(0, 1, 2),
out_channels=256,
backbone_cfg=dict(
type='ResNet',
in_channels=3,
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
norm_cfg=norm_cfg,
align_corners=False,
init_cfg=None),
decode_head=dict(
type='FCNHead',
in_channels=256,
in_index=0,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=[
dict(
type='FCNHead',
in_channels=128,
channels=64,
num_convs=1,
num_classes=19,
in_index=1,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=128,
channels=64,
num_convs=1,
num_classes=19,
in_index=2,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
],
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 2,014 | 28.202899 | 78 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/bisenetv2.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='BiSeNetV2',
detail_channels=(64, 64, 128),
semantic_channels=(16, 32, 64, 128),
semantic_expansion_ratio=6,
bga_channels=128,
out_indices=(0, 1, 2, 3, 4),
init_cfg=None,
align_corners=False),
decode_head=dict(
type='FCNHead',
in_channels=128,
in_index=0,
channels=1024,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=[
dict(
type='FCNHead',
in_channels=16,
channels=16,
num_convs=2,
num_classes=19,
in_index=1,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=32,
channels=64,
num_convs=2,
num_classes=19,
in_index=2,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=64,
channels=256,
num_convs=2,
num_classes=19,
in_index=3,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='FCNHead',
in_channels=128,
channels=1024,
num_convs=2,
num_classes=19,
in_index=4,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
],
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 2,419 | 28.876543 | 78 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/ccnet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='CCHead',
in_channels=2048,
in_index=3,
channels=512,
recurrence=2,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,258 | 26.977778 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/cgnet.py | # model settings
norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='CGNet',
norm_cfg=norm_cfg,
in_channels=3,
num_channels=(32, 64, 128),
num_blocks=(3, 21),
dilations=(2, 4),
reductions=(8, 16)),
decode_head=dict(
type='FCNHead',
in_channels=256,
in_index=2,
channels=256,
num_convs=0,
concat_input=False,
dropout_ratio=0,
num_classes=19,
norm_cfg=norm_cfg,
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0,
class_weight=[
2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352,
10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905,
10.347791, 6.3927646, 10.226669, 10.241062, 10.280587,
10.396974, 10.055647
])),
# model training and testing settings
train_cfg=dict(sampler=None),
test_cfg=dict(mode='whole'))
| 1,110 | 29.861111 | 79 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/danet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DAHead',
in_channels=2048,
in_index=3,
channels=512,
pam_channels=64,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,261 | 27.044444 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/deeplabv3_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,273 | 27.311111 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/deeplabv3_unet_s5-d16.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='UNet',
in_channels=3,
base_channels=64,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1),
with_cp=False,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='InterpConv'),
norm_eval=False),
decode_head=dict(
type='ASPPHead',
in_channels=64,
in_index=4,
channels=16,
dilations=(1, 12, 24, 36),
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=128,
in_index=3,
channels=64,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='slide', crop_size=(256, 256), stride=(170, 170)))
| 1,513 | 28.686275 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/deeplabv3plus_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DepthwiseSeparableASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
c1_in_channels=256,
c1_channels=48,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,343 | 27.595745 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/dmnet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DMHead',
in_channels=2048,
in_index=3,
channels=512,
filter_sizes=(1, 3, 5, 7),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,302 | 27.955556 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/dnl_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DNLHead',
in_channels=2048,
in_index=3,
channels=512,
dropout_ratio=0.1,
reduction=2,
use_scale=True,
mode='embedded_gaussian',
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,316 | 27.021277 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/dpt_vit-b16.py | norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='pretrain/vit-b16_p16_224-80ecf9dd.pth', # noqa
backbone=dict(
type='VisionTransformer',
img_size=224,
embed_dims=768,
num_layers=12,
num_heads=12,
out_indices=(2, 5, 8, 11),
final_norm=False,
with_cls_token=True,
output_cls_token=True),
decode_head=dict(
type='DPTHead',
in_channels=(768, 768, 768, 768),
channels=256,
embed_dims=768,
post_process_channels=[96, 192, 384, 768],
num_classes=150,
readout_type='project',
input_transform='multiple_select',
in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable
| 1,004 | 30.40625 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/emanet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='EMAHead',
in_channels=2048,
in_index=3,
channels=256,
ema_channels=512,
num_bases=64,
num_stages=3,
momentum=0.1,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,329 | 26.708333 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/encnet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='EncHead',
in_channels=[512, 1024, 2048],
in_index=(1, 2, 3),
channels=512,
num_codes=32,
use_se_loss=True,
add_lateral=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_se_decode=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,435 | 28.306122 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/erfnet_fcn.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='ERFNet',
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
init_cfg=None),
decode_head=dict(
type='FCNHead',
in_channels=16,
channels=128,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,008 | 29.575758 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fast_scnn.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='FastSCNN',
downsample_dw_channels=(32, 48),
global_in_channels=64,
global_block_channels=(64, 96, 128),
global_block_strides=(2, 2, 1),
global_out_channels=128,
higher_in_channels=64,
lower_in_channels=128,
fusion_out_channels=128,
out_indices=(0, 1, 2),
norm_cfg=norm_cfg,
align_corners=False),
decode_head=dict(
type='DepthwiseSeparableFCNHead',
in_channels=128,
channels=128,
concat_input=False,
num_classes=19,
in_index=-1,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1)),
auxiliary_head=[
dict(
type='FCNHead',
in_channels=128,
channels=32,
num_convs=1,
num_classes=19,
in_index=-2,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
dict(
type='FCNHead',
in_channels=64,
channels=32,
num_convs=1,
num_classes=19,
in_index=-3,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
],
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,759 | 29.344828 | 77 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
dilations=(1, 1, 2, 4),
strides=(1, 2, 2, 2),
out_indices=(1, 2, 3),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=dict(
type='JPU',
in_channels=(512, 1024, 2048),
mid_channels=512,
start_level=0,
end_level=-1,
dilations=(1, 2, 4, 8),
align_corners=False,
norm_cfg=norm_cfg),
decode_head=dict(
type='PSPHead',
in_channels=2048,
in_index=2,
channels=512,
pool_scales=(1, 2, 3, 6),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=1,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,502 | 26.833333 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fcn_hr18.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
norm_cfg=norm_cfg,
norm_eval=False,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144)))),
decode_head=dict(
type='FCNHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
channels=sum([18, 36, 72, 144]),
input_transform='resize_concat',
kernel_size=1,
num_convs=1,
concat_input=False,
dropout_ratio=-1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,646 | 30.075472 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fcn_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='FCNHead',
in_channels=2048,
in_index=3,
channels=512,
num_convs=2,
concat_input=True,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,285 | 26.956522 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fcn_unet_s5-d16.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='UNet',
in_channels=3,
base_channels=64,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1),
with_cp=False,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='InterpConv'),
norm_eval=False),
decode_head=dict(
type='FCNHead',
in_channels=64,
in_index=4,
channels=64,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=128,
in_index=3,
channels=64,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='slide', crop_size=(256, 256), stride=(170, 170)))
| 1,526 | 28.365385 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fpn_poolformer_s12.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth' # noqa
custom_imports = dict(imports='mmcls.models', allow_failed_imports=False)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='mmcls.PoolFormer',
arch='s12',
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.'),
in_patch_size=7,
in_stride=4,
in_pad=2,
down_patch_size=3,
down_stride=2,
down_pad=1,
drop_rate=0.,
drop_path_rate=0.,
out_indices=(0, 2, 4, 6),
frozen_stages=0,
),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
decode_head=dict(
type='FPNHead',
in_channels=[256, 256, 256, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,368 | 30.837209 | 148 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/fpn_r50.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
decode_head=dict(
type='FPNHead',
in_channels=[256, 256, 256, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,056 | 27.567568 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/gcnet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='GCHead',
in_channels=2048,
in_index=3,
channels=512,
ratio=1 / 4.,
pooling_type='att',
fusion_types=('channel_add', ),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,326 | 27.234043 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/icnet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='ICNet',
backbone_cfg=dict(
type='ResNetV1c',
in_channels=3,
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
in_channels=3,
layer_channels=(512, 2048),
light_branch_middle_channels=32,
psp_out_channels=512,
out_channels=(64, 256, 256),
norm_cfg=norm_cfg,
align_corners=False,
),
neck=dict(
type='ICNeck',
in_channels=(64, 256, 256),
out_channels=128,
norm_cfg=norm_cfg,
align_corners=False),
decode_head=dict(
type='FCNHead',
in_channels=128,
channels=128,
num_convs=1,
in_index=2,
dropout_ratio=0,
num_classes=19,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=[
dict(
type='FCNHead',
in_channels=128,
channels=128,
num_convs=1,
num_classes=19,
in_index=0,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='FCNHead',
in_channels=128,
channels=128,
num_convs=1,
num_classes=19,
in_index=1,
norm_cfg=norm_cfg,
concat_input=False,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
],
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 2,154 | 27.733333 | 78 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/isanet_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ISAHead',
in_channels=2048,
in_index=3,
channels=512,
isa_channels=256,
down_factor=(8, 8),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,291 | 27.086957 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/lraspp_m-v3-d8.py | # model settings
norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='MobileNetV3',
arch='large',
out_indices=(1, 3, 16),
norm_cfg=norm_cfg),
decode_head=dict(
type='LRASPPHead',
in_channels=(16, 24, 960),
in_index=(0, 1, 2),
channels=128,
input_transform='multiple_select',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 766 | 28.5 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/nonlocal_r50-d8.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='NLHead',
in_channels=2048,
in_index=3,
channels=512,
dropout_ratio=0.1,
reduction=2,
use_scale=True,
mode='embedded_gaussian',
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 1,315 | 27 | 74 | py |
mmsegmentation | mmsegmentation-master/configs/_base_/models/ocrnet_hr18.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
norm_cfg=norm_cfg,
norm_eval=False,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144)))),
decode_head=[
dict(
type='FCNHead',
in_channels=[18, 36, 72, 144],
channels=sum([18, 36, 72, 144]),
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
kernel_size=1,
num_convs=1,
concat_input=False,
dropout_ratio=-1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='OCRHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
channels=512,
ocr_channels=256,
dropout_ratio=-1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
],
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 2,196 | 30.84058 | 78 | py |